diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..b79637eafe761be6070b42da8e5a7d10193671f2 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+*.o filter=lfs diff=lfs merge=lfs -text
+*.so filter=lfs diff=lfs merge=lfs -text
+*.1 filter=lfs diff=lfs merge=lfs -text
+*.2 filter=lfs diff=lfs merge=lfs -text
+*.0 filter=lfs diff=lfs merge=lfs -text
+*.mp4 filter=lfs diff=lfs merge=lfs -text
+*.doubango filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..54f0beb09699c83e7b03a1babc01ff2f1152db09
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+.idea
+__pycache__/
+python/__pycache__/
\ No newline at end of file
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..2afa75864315c562714dfab32e492f86dd26cd06
--- /dev/null
+++ b/app.py
@@ -0,0 +1,70 @@
+import os
+import subprocess
+import glob
+import gradio as gr
+from gradio.components import Gallery, Video, Textbox
+from classificatsion_video_demo import process
+
+
+def get_car_numbers(result_folder):
+ # change current working directory to binare/linux/mu gdfg
+ current_dir = os.getcwd()
+ os.chdir("binaries/linux/x86_64")
+
+ # construct and run the command
+ # command = "python ../../../python/setup.py build_ext --inplace -v"
+ # process = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE)
+ command = "PYTHONPATH=$PYTHONPATH:.:../../../python LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH python ../../../scripts/alpr/license_plate_recognizer.py --image ../../../"+result_folder+" --assets ../../../assets"
+ process = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE)
+
+ # send newline character to standard input
+ process.communicate(input='\n'.encode())
+
+ # wait for the subprocess to exit
+ process.wait()
+ os.chdir(current_dir)
+ return 0
+
+def predict(video_path):
+ """
+ Gradio interface orqali yuklab olingan videodan problem framelarni ajratib olinadi va resultat sifatida Galleryga chiqariladi
+ :param video_path:
+ :return: file
+ """
+ # Your image processing code here
+ # print(video_path)
+ _path = "assets/result_imgs"
+ images = glob.glob(f'{_path}/*.jpg')
+ for image in images:
+ os.remove(image)
+ problem, good, result = process(video_path)
+ images = glob.glob(f'{_path}/*.jpg')
+
+ get_car_numbers(result)
+ # # selected_images = [images[0],images[len(images)//2],images[-1]]
+ return problem+good, problem, good,images
+ # return 0, 0, 0, images
+
+
+my_example = [
+ # ['video/vid_39_1284-2_1202.mp4']
+ # ['video/vid_39_1284-2_1174.mp4']
+]
+
+my_title = "Video Klassifikatsiya"
+my_description = "128-4 qoida `To’xtash chizig’ini bosish` bo'yicha video analiz"
+all_frame = Textbox(label="Umumiy framelar soni")
+problem_frame = Textbox(label="Muammoli framelar soni")
+good_frame = Textbox(label="Muammosiz framelar soni")
+
+problem_frames = Gallery(label="Muammoli rasmlar", elem_id="gallery").style(
+ grid=[3], height="auto"
+)
+input_video = Video(label="Kiruvchi video") # Create input video component
+
+gr.Interface(fn=predict,
+ inputs=input_video,
+ outputs=[all_frame,problem_frame,good_frame,problem_frames],
+ title=my_title,
+ examples=my_example,
+ description=my_description).launch()
diff --git a/assets/README.md b/assets/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3db202821bfe4e9f961e82e1a668b6b0649e179b
--- /dev/null
+++ b/assets/README.md
@@ -0,0 +1,9 @@
+- [fonts](fonts): Fonts used to display the license plates. You don't need these files in your application.
+- [images](images): Sample images. You don't need these files in your application.
+- [models](models): Tensorflow models. Not all files in this folder are required. Check [models/README.md](models/README.md).
+- [models.amlogic_npu](models.amlogic_npu): [Amlogic NPU](https://www.amlogic.com/) models. Not all files in this folder are required. Check [models.amlogic_npu/README.md](models.amlogic_npu/README.md).
+- [models.openvino](models.openvino): [Intel OpenVINO](https://docs.openvinotoolkit.org/latest/index.html) models. Not all files in this folder are required. Check [models.openvino/README.md](models.openvino/README.md).
+- [models.tensorrt](models.tensorrt): [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) models. Not all files in this folder are required. Check [models.tensorrt/README.md](models.tensorrt/README.md).
+
+
+
diff --git a/assets/fonts/GlNummernschildEng-XgWd-license.txt b/assets/fonts/GlNummernschildEng-XgWd-license.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6b942eebc6fe66b4966d21d4e073866fa9fc2ed7
--- /dev/null
+++ b/assets/fonts/GlNummernschildEng-XgWd-license.txt
@@ -0,0 +1,8 @@
+https://www.wfonts.com/font/gl-nummernschild-eng
+
+GL-Nummernschild-EngmediumCLP-GL-Nummernschild-EngGL-Nummernschild-EngVersion 20110112 GL-Nummernschild-EngGutenberg Labo
+http://gutenberg.sourceforge.jp/These fonts are free softwares.
+
+Unlimited permission is granted to use, copy, and distribute it, with or without modification, either commercially and noncommercially.
+
+THESE FONTS ARE PROVIDED ''AS IS'' WITHOUT WARRANTY.
\ No newline at end of file
diff --git a/assets/fonts/GlNummernschildEng-XgWd.ttf b/assets/fonts/GlNummernschildEng-XgWd.ttf
new file mode 100644
index 0000000000000000000000000000000000000000..0c9f17697c4cbf53fb890bc35caa00004dc07cb6
Binary files /dev/null and b/assets/fonts/GlNummernschildEng-XgWd.ttf differ
diff --git a/assets/images/china.jpg b/assets/images/china.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..88797e158beefae3c7451c7411976c56b5602af1
Binary files /dev/null and b/assets/images/china.jpg differ
diff --git a/assets/images/korea.jpg b/assets/images/korea.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d8725701dba809e7c61256ae7d756551158745a6
Binary files /dev/null and b/assets/images/korea.jpg differ
diff --git a/assets/images/lic_us_1280x720.jpg b/assets/images/lic_us_1280x720.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c75db8ef22d5a858a866d30334bd1d9c397e0ec8
Binary files /dev/null and b/assets/images/lic_us_1280x720.jpg differ
diff --git a/assets/images/london_traffic.jpg b/assets/images/london_traffic.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f2007a1859d0fe39a98c94da5e7d2d7ff7910e1e
Binary files /dev/null and b/assets/images/london_traffic.jpg differ
diff --git a/assets/images/multi.jpg b/assets/images/multi.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..4cb8f1f568f56043af18b6aad1f6f9aa0c7dbea6
Binary files /dev/null and b/assets/images/multi.jpg differ
diff --git a/assets/images/nyc.jpg b/assets/images/nyc.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..808a0c994d24adcc82275e3cd5b1ef273c56eb02
Binary files /dev/null and b/assets/images/nyc.jpg differ
diff --git a/assets/models.amlogic_npu/PID0x7D/.gitkeep b/assets/models.amlogic_npu/PID0x7D/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/assets/models.amlogic_npu/PID0x88/.gitkeep b/assets/models.amlogic_npu/PID0x88/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_detect_main.mobile.model.amlogic.doubango b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_detect_main.mobile.model.amlogic.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..d1a697a7308f1b4e8c69b29a01695ff21b65042b
--- /dev/null
+++ b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_detect_main.mobile.model.amlogic.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:afc5d16038bc3483732c56ae78417c484d287116f836aa24fe4e74525f477382
+size 8408484
diff --git a/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_detect_pysearch.mobile.model.amlogic.doubango b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_detect_pysearch.mobile.model.amlogic.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..199131473245b761f3780f5966bacfeb3c20615c
--- /dev/null
+++ b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_detect_pysearch.mobile.model.amlogic.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:646e21f87a989a0616502e3293df4cf623bb73d9076ac6c2738af05180fb8134
+size 4993188
diff --git a/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_lpci.mobile.model.amlogic.doubango b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_lpci.mobile.model.amlogic.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..34974966de38e45a0bdde733f6c020ae44439881
--- /dev/null
+++ b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_lpci.mobile.model.amlogic.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7dad948481a8648880f17d93e2ef2c9267c19da3f01f97ae62ca55cfcdc04fda
+size 8328382
diff --git a/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_vbsr.mobile.model.amlogic.doubango b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_vbsr.mobile.model.amlogic.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..2a056c252fb6622e6e77f3e81d010c6865dbfc22
--- /dev/null
+++ b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_vbsr.mobile.model.amlogic.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a79bc89312f6626ef0e638d25b496e17c9377a640392e2dd0d4eef143ac89c2
+size 8192708
diff --git a/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_vcr.mobile.model.amlogic.doubango b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_vcr.mobile.model.amlogic.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..aecc2e166c3fbaa2bcd90950a21bcd7fc4726988
--- /dev/null
+++ b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_vcr.mobile.model.amlogic.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f6b648572dcbd4cf6d0dd54001f3e39b2bd42cded3db00664b4b0465e7082bf
+size 8181700
diff --git a/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_vmmr.mobile.model.amlogic.doubango b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_vmmr.mobile.model.amlogic.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..9247d63ebaba7be62ba98e23de46586e2855b2fe
--- /dev/null
+++ b/assets/models.amlogic_npu/PID0x88/ultimateALPR-SDK_klass_vmmr.mobile.model.amlogic.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:27183f6b7344d5803919c5942397be43f91f1ceb4cf7086b3ab4be81ed547206
+size 10934206
diff --git a/assets/models.amlogic_npu/PID0x99/.gitkeep b/assets/models.amlogic_npu/PID0x99/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/assets/models.amlogic_npu/PID0xA1/.gitkeep b/assets/models.amlogic_npu/PID0xA1/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/assets/models.amlogic_npu/PID0xB9/.gitkeep b/assets/models.amlogic_npu/PID0xB9/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/assets/models.amlogic_npu/PID0xBE/.gitkeep b/assets/models.amlogic_npu/PID0xBE/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/assets/models.amlogic_npu/PID0xE8/.gitkeep b/assets/models.amlogic_npu/PID0xE8/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/assets/models.amlogic_npu/README.md b/assets/models.amlogic_npu/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3cad7dbe02c70345b4380aee6aa1d90f0bb41c7a
--- /dev/null
+++ b/assets/models.amlogic_npu/README.md
@@ -0,0 +1,51 @@
+
+- [Files](#files)
+ - [supported_hardware.txt](#files-supported_hardware)
+ - [pid_serial_mapping.txt](#pid_serial_mapping)
+- [Models-requirement](#models-requirement)
+
+
+
+Not all files in this folder are required. It depends on the [features](https://www.doubango.org/SDKs/anpr/docs/Features.html) you want to activate.
+
+**This entire folder is useless if your hardware isn't Amlogic or you've disabled NPU ([Neural Processing Unit](https://en.wikichip.org/wiki/neural_processor)) acceleration.**
+
+
+# Files
+
+
+## supported_hardware.txt
+[This file](supported_hardware.txt) contains the list of hardware names (case insensitive) for which NPU ([Neural Processing Unit](https://en.wikichip.org/wiki/neural_processor)) acceleration will be enabled. Your hardware must be listed in this file.
+You can edit this file to include your hardware name. To get the name of your hardware: `cat /proc/cpuinfo | grep Hardware`.
+
+
+## pid_serial_mapping.txt
+[This file](pid_serial_mapping.txt) contains the mapping between the PID of the models and the 4-first digits of your serial number. More at [Model Transcoding and Running User Guide (1.0).pdf](https://github.com/khadas/aml_npu_sdk/blob/master/docs/en/Model%20Transcoding%20and%20Running%20User%20Guide%20(1.0).pdf) page 9.
+
+| PID | serial (4-first digits) |
+|-------- | --- |
+|0x7D|290a|
+|**0x88**|**290b**|
+|0x99|2b0a|
+|0xA1|300a|
+|0xA1|300b|
+|0x99|2f0a|
+|0xB9|2f0b|
+|0xBE|330a|
+|0xBE|330b|
+|0xE8|380a|
+|0xE8|380b|
+
+When I run `cat /proc/cpuinfo | grep Serial` on my Khadas VIM3 I get `Serial : 290b100001111500001731343258****`, that means my 4-first digits are `290b` which means my PID is `0x88` which means my models will be inside [PID0x88](PID0x88) folder.
+Please contact us via the [dev-group](https://groups.google.com/g/doubango-ai) if the folder correspoding to your PID is empty.
+
+
+# Models requirement
+| Folder | Requirement |
+|-------- | --- |
+| ultimateALPR-SDK_detect_main.mobile.model.amlogic.doubango | **Always required when NPU acceleration is enabled** |
+| ultimateALPR-SDK_detect_pysearch.mobile.model.amlogic.doubango | **Always required when NPU acceleration is enabled** |
+| ultimateALPR-SDK_klass_lpci.mobile.model.amlogic.doubango | Only if NPU acceleration is enabled and you want [License Plate Country Identification (LPCI)](https://www.doubango.org/SDKs/anpr/docs/Features.html#license-plate-country-identification-lpci) |
+| ultimateALPR-SDK_klass_vbsr.mobile.model.amlogic.doubango | Only if NPU acceleration is enabled and you want [Vehicle Color Recognition (VCR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-color-recognition-vcr) |
+| ultimateALPR-SDK_klass_vcr.mobile.model.amlogic.doubango | Only if NPU acceleration is enabled and you want [Vehicle Make Model Recognition (VMMR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-make-model-recognition-vmmr) |
+| ultimateALPR-SDK_klass_vmmr.mobile.model.amlogic.doubango | Only if NPU acceleration is enabled and you want [Vehicle Body Style Recognition (VBSR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-body-style-recognition-vbsr) |
diff --git a/assets/models.amlogic_npu/pid_serial_mapping.txt b/assets/models.amlogic_npu/pid_serial_mapping.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5d9d34a8e00b8d3d524b65ee3b2d81e98e1d3abf
--- /dev/null
+++ b/assets/models.amlogic_npu/pid_serial_mapping.txt
@@ -0,0 +1,11 @@
+0x7D,290a
+0x88,290b
+0x99,2b0a
+0xA1,300a
+0xA1,300b
+0x99,2f0a
+0xB9,2f0b
+0xBE,330a
+0xBE,330b
+0xE8,380a
+0xE8,380b
diff --git a/assets/models.amlogic_npu/supported_hardware.txt b/assets/models.amlogic_npu/supported_hardware.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8a1dda8dff1fc3885358d03df184985f28a1011c
--- /dev/null
+++ b/assets/models.amlogic_npu/supported_hardware.txt
@@ -0,0 +1,2 @@
+Amlogic
+Khadas VIM3
\ No newline at end of file
diff --git a/assets/models/README.md b/assets/models/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3f35db8bfbdb0e22ffaae3fe6e73ade578e3c95e
--- /dev/null
+++ b/assets/models/README.md
@@ -0,0 +1,50 @@
+- Not all files in this folder are required. It depends on your CPU type.
+
+- All models listed here (**.model.doubango**) are useless on NVIDIA Jetson (ARM). The models for the NVIDIA Jetson devices are under [models.tensorrt](../models.tensorrt).
+
+# All charsets #
+
+| | ARM | x86 |comment
+|-------- | --- | --- | ---|
+| ultimateALPR-SDK_detect_main.desktop.model.doubango | No | **Yes** | **Always required** |
+| ultimateALPR-SDK_detect_pysearch.desktop.model.doubango | No | **Yes** | **Always required** |
+| ultimateALPR-SDK_detecti_main.mobile.model.doubango | **Yes** | No | **Always required** |
+| ultimateALPR-SDK_detecti_pysearch.mobile.model.doubango | **Yes** | No | **Always required** |
+| ultimateALPR-SDK_klass_labels_lpci.txt.doubango | **Yes** | **Yes** | Only if you want [License Plate Country Identification (LPCI)](https://www.doubango.org/SDKs/anpr/docs/Features.html#license-plate-country-identification-lpci) |
+| ultimateALPR-SDK_klass_lpci.desktop.model.doubango | No | **Yes** | Only if you want [License Plate Country Identification (LPCI)](https://www.doubango.org/SDKs/anpr/docs/Features.html#license-plate-country-identification-lpci) |
+| ultimateALPR-SDK_klassi_lpci.mobile.model.doubango | **Yes** | No | Only if you want [License Plate Country Identification (LPCI)](https://www.doubango.org/SDKs/anpr/docs/Features.html#license-plate-country-identification-lpci) |
+| ultimateALPR-SDK_klass_labels_vcr.txt.doubango | **Yes** | **Yes** | Only if you want [Vehicle Color Recognition (VCR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-color-recognition-vcr) |
+| ultimateALPR-SDK_klass_vcr.desktop.model.doubango | No | **Yes** | Only if you want [Vehicle Color Recognition (VCR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-color-recognition-vcr) |
+| ultimateALPR-SDK_klassi_vcr.mobile.model.doubango | **Yes** | No | Only if you want [Vehicle Color Recognition (VCR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-color-recognition-vcr) |
+| ultimateALPR-SDK_klass_labels_vmmr.txt.doubango | **Yes** | **Yes** | Only if you want [Vehicle Make Model Recognition (VMMR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-make-model-recognition-vmmr) |
+| ultimateALPR-SDK_klass_vmmr.desktop.model.doubango | No | **Yes** | Only if you want [Vehicle Make Model Recognition (VMMR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-make-model-recognition-vmmr) |
+| ultimateALPR-SDK_klassi_vmmr.mobile.model.doubango | **Yes** | No | Only if you want [Vehicle Make Model Recognition (VMMR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-make-model-recognition-vmmr) |
+| ultimateALPR-SDK_klass_labels_vbsr.txt.doubango | **Yes** | **Yes** | Only if you want [Vehicle Body Style Recognition (VBSR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-body-style-recognition-vbsr) |
+| ultimateALPR-SDK_klass_vbsr.desktop.model.doubango | No | **Yes** | Only if you want [Vehicle Body Style Recognition (VBSR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-body-style-recognition-vbsr) |
+| ultimateALPR-SDK_klassi_vbsr.mobile.model.doubango | **Yes** | No | Only if you want [Vehicle Body Style Recognition (VBSR)](https://www.doubango.org/SDKs/anpr/docs/Features.html#vehicle-body-style-recognition-vbsr) |
+
+
+# Latin ([A-Z0-9]) charsets #
+
+| | ARM | x86 |
+|-------- | --- | --- |
+| charset_anpr_latin_size=37.txt | **Yes** | **Yes** |
+| ultimateALPR-SDK_recogn2x150_latin.desktop.model.doubango | No | **Yes** |
+| ultimateALPR-SDK_recogn1x100_latin.desktop.model.doubango | **Yes** | No |
+
+# Korean charsets #
+
+| | ARM | x86 |
+|-------- | --- | --- |
+| charset_anpr_korean_size=78.txt | **Yes** | **Yes** |
+| ultimateALPR-SDK_recogn2x150_korean.desktop.model.doubango | No | **Yes** |
+| ultimateALPR-SDK_recogn1x100_korean.desktop.model.doubango | **Yes** | No |
+
+# Chinese charsets #
+
+| | ARM | x86 |
+|-------- | --- | --- |
+| charset_anpr_chinese_size=73.txt | **Yes** | **Yes** |
+| ultimateALPR-SDK_recogn2x150_chinese.desktop.model.doubango | No | **Yes** |
+| ultimateALPR-SDK_recogn1x100_chinese.desktop.model.doubango | **Yes** | No |
+
diff --git a/assets/models/charset_anpr_chinese_size=73.txt b/assets/models/charset_anpr_chinese_size=73.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bc75afe5b21313ad1ed4ba8d612b7b023e4718c4
--- /dev/null
+++ b/assets/models/charset_anpr_chinese_size=73.txt
@@ -0,0 +1,73 @@
+0
+1 0
+2 1
+3 2
+4 3
+5 4
+6 5
+7 6
+8 7
+9 8
+10 9
+11 A
+12 B
+13 C
+14 D
+15 E
+16 F
+17 G
+18 H
+19 I
+20 J
+21 K
+22 L
+23 M
+24 N
+25 O
+26 P
+27 Q
+28 R
+29 S
+30 T
+31 U
+32 V
+33 W
+34 X
+35 Y
+36 Z
+37 云
+38 京
+39 冀
+40 吉
+41 学
+42 宁
+43 川
+44 挂
+45 新
+46 晋
+47 桂
+48 沪
+49 津
+50 浙
+51 渝
+52 港
+53 湘
+54 澳
+55 琼
+56 甘
+57 皖
+58 粤
+59 苏
+60 蒙
+61 藏
+62 警
+63 豫
+64 贵
+65 赣
+66 辽
+67 鄂
+68 闽
+69 陕
+70 青
+71 鲁
+72 黑
diff --git a/assets/models/charset_anpr_korean_size=78.txt b/assets/models/charset_anpr_korean_size=78.txt
new file mode 100644
index 0000000000000000000000000000000000000000..dc49dcb74021535d1c054c2ff8aadff323d1c5d9
--- /dev/null
+++ b/assets/models/charset_anpr_korean_size=78.txt
@@ -0,0 +1,78 @@
+0
+1 0
+2 1
+3 2
+4 3
+5 4
+6 5
+7 6
+8 7
+9 8
+10 9
+11 가
+12 강
+13 거
+14 경
+15 고
+16 광
+17 교
+18 구
+19 기
+20 나
+21 남
+22 너
+23 노
+24 누
+25 다
+26 대
+27 더
+28 도
+29 두
+30 라
+31 러
+32 령
+33 로
+34 루
+35 리
+36 린
+37 마
+38 머
+39 모
+40 무
+41 므
+42 바
+43 배
+44 버
+45 보
+46 부
+47 북
+48 사
+49 산
+50 서
+51 세
+52 소
+53 수
+54 아
+55 어
+56 오
+57 외
+58 우
+59 울
+60 원
+61 육
+62 이
+63 인
+64 자
+65 저
+66 전
+67 제
+68 조
+69 종
+70 주
+71 천
+72 충
+73 타
+74 하
+75 허
+76 호
+77 후
diff --git a/assets/models/charset_anpr_latin_size=37.txt b/assets/models/charset_anpr_latin_size=37.txt
new file mode 100644
index 0000000000000000000000000000000000000000..31844b3c2fa24911a34cb73667d349601731246d
--- /dev/null
+++ b/assets/models/charset_anpr_latin_size=37.txt
@@ -0,0 +1,37 @@
+0
+1 A
+2 B
+3 C
+4 D
+5 E
+6 F
+7 G
+8 H
+9 I
+10 J
+11 K
+12 L
+13 M
+14 N
+15 O
+16 P
+17 Q
+18 R
+19 S
+20 T
+21 U
+22 V
+23 W
+24 X
+25 Y
+26 Z
+27 0
+28 1
+29 2
+30 3
+31 4
+32 5
+33 6
+34 7
+35 8
+36 9
diff --git a/assets/models/ultimateALPR-SDK_detect_main.desktop.model.doubango b/assets/models/ultimateALPR-SDK_detect_main.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..a9b85a91c78c8905839fc87c2549eded52ef9712
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_detect_main.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:47ec6c1af20fe8c88f2051e96a0e88ef5002689780b50b439a5a9ee241777e0f
+size 12945520
diff --git a/assets/models/ultimateALPR-SDK_detect_pysearch.desktop.model.doubango b/assets/models/ultimateALPR-SDK_detect_pysearch.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..595ea26160627bfc49636846aa41eb5453f33ca3
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_detect_pysearch.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ee18f0e0399baa403c4c236f514272c8f6c1abf1e80bd4a89e907e0f21c999e1
+size 7728444
diff --git a/assets/models/ultimateALPR-SDK_detecti_main.mobile.model.doubango b/assets/models/ultimateALPR-SDK_detecti_main.mobile.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..e91816c4d2fe10295ae2028a1148ca0c990dc565
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_detecti_main.mobile.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fedb90d1eed6bdc0ff588fd067c3b704410a332ee54353cf9b4199cecb5501e
+size 3184964
diff --git a/assets/models/ultimateALPR-SDK_detecti_pysearch.mobile.model.doubango b/assets/models/ultimateALPR-SDK_detecti_pysearch.mobile.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..0f5c4f82c757872b2928c8c0f8ee4b117ba41cc8
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_detecti_pysearch.mobile.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:976c59328599fad64330526c500b83216dc532e635ba1de96cda4427d0bf6268
+size 1880124
diff --git a/assets/models/ultimateALPR-SDK_klass_labels_lpci.txt.doubango b/assets/models/ultimateALPR-SDK_klass_labels_lpci.txt.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..1af0eb3d41019dfb88e90bf396e091b78cf1c782
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klass_labels_lpci.txt.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51359ceec576879dd7401482b2cc6842a954810b0c5b4596cfafa596a75c2bca
+size 3316
diff --git a/assets/models/ultimateALPR-SDK_klass_labels_vbsr.txt.doubango b/assets/models/ultimateALPR-SDK_klass_labels_vbsr.txt.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..3bfa5d5a68cd861b3d2faa916ed245c6af1154a3
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klass_labels_vbsr.txt.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bb3235b57c1de1593da248a190118250121a5b18a15619428bb4f7692d6f0579
+size 113
diff --git a/assets/models/ultimateALPR-SDK_klass_labels_vcr.txt.doubango b/assets/models/ultimateALPR-SDK_klass_labels_vcr.txt.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..c1b61fc2aa30205f49a90486c04816309e00747c
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klass_labels_vcr.txt.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d0723ee8feb78b9995d31537bdbd639df733b9867683064e3e86ab97a0e3f0cc
+size 100
diff --git a/assets/models/ultimateALPR-SDK_klass_labels_vmmr.txt.doubango b/assets/models/ultimateALPR-SDK_klass_labels_vmmr.txt.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..81b054112da9c8b670de512aaf32bbb6465f0f9c
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klass_labels_vmmr.txt.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a4dfd2025144c7fab9c24843778cc5af5ec092a33c669ef70274c053f0d5044
+size 32644
diff --git a/assets/models/ultimateALPR-SDK_klass_lpci.desktop.model.doubango b/assets/models/ultimateALPR-SDK_klass_lpci.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..f148d97bbc09fbcab0ac31d077bc035d6ffa7dbd
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klass_lpci.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67efc7a51445052e687e88c2ea12c998c92e8edc6c9fba152d12fae7576dee7d
+size 17663312
diff --git a/assets/models/ultimateALPR-SDK_klass_vbsr.desktop.model.doubango b/assets/models/ultimateALPR-SDK_klass_vbsr.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..10e02fcc3f66f45340f3fbd9efafa63a690cd86f
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klass_vbsr.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af37faaee19068d224b1832c8d284e7e58382854fb19cfe141758eb357268fa7
+size 17141143
diff --git a/assets/models/ultimateALPR-SDK_klass_vcr.desktop.model.doubango b/assets/models/ultimateALPR-SDK_klass_vcr.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..a53ab14017ea04ec5781749f50e5e40d8ea48801
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klass_vcr.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:938dfe36d5144a632c24f7fc7d0b55c23de23f48d28193069bc8e2d540fd1d2e
+size 17146267
diff --git a/assets/models/ultimateALPR-SDK_klass_vmmr.desktop.model.doubango b/assets/models/ultimateALPR-SDK_klass_vmmr.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..b3357550a1760b3ec378063f216f4f75079ee04e
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klass_vmmr.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7dde0b60ee9ba4d9522b5000f461f55d1fd7d16589462393804d50f42a11638e
+size 27629497
diff --git a/assets/models/ultimateALPR-SDK_klassi_lpci.mobile.model.doubango b/assets/models/ultimateALPR-SDK_klassi_lpci.mobile.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..137e7c863f2744968339f7b23ad791914e65aeae
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klassi_lpci.mobile.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0cf468d2770a62a4ccc5f475c39b8e8a3fd79496c74d6c91889a1016ea40006d
+size 4917660
diff --git a/assets/models/ultimateALPR-SDK_klassi_vbsr.mobile.model.doubango b/assets/models/ultimateALPR-SDK_klassi_vbsr.mobile.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..a5fb78427dda0efa440dbfdeb33b53bad964c5a7
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klassi_vbsr.mobile.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:262e93a92b6201b0c1e66057321d55b883bb1ac3a29d53504cc9993aea6ff6cf
+size 4772404
diff --git a/assets/models/ultimateALPR-SDK_klassi_vcr.mobile.model.doubango b/assets/models/ultimateALPR-SDK_klassi_vcr.mobile.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..75986ecaeab0161cd8e9c6e6a94b11ffd52dcdfb
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klassi_vcr.mobile.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0dd460e76ffa5c1835952dbc8382567af9a889f095fb01396f704ff5e6f19e66
+size 4929260
diff --git a/assets/models/ultimateALPR-SDK_klassi_vmmr.mobile.model.doubango b/assets/models/ultimateALPR-SDK_klassi_vmmr.mobile.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..8e41a38e0aa5c17646f07d0e44b34dfad8a187db
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_klassi_vmmr.mobile.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f005e74ea379fe98ed0694aaa62d56ad9cb496a6b374d877896b29c9298c0e90
+size 7461724
diff --git a/assets/models/ultimateALPR-SDK_recogn1x100_chinese.desktop.model.doubango b/assets/models/ultimateALPR-SDK_recogn1x100_chinese.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..8c8dd14b32941b9d0320de496c0e1ab9b2fe2bdd
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_recogn1x100_chinese.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:72a3ade8249af36a8d521a5803e57396f02cadf953233024d5a3fdcce49ecb0f
+size 7209129
diff --git a/assets/models/ultimateALPR-SDK_recogn1x100_korean.desktop.model.doubango b/assets/models/ultimateALPR-SDK_recogn1x100_korean.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..6206b52b192256239a9b21c5326d26776249f681
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_recogn1x100_korean.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3619f70cc09bafff1424fd267a6e68cf90e87f6c2d75a76a3b1fd75e1c443282
+size 7257531
diff --git a/assets/models/ultimateALPR-SDK_recogn1x100_latin.desktop.model.doubango b/assets/models/ultimateALPR-SDK_recogn1x100_latin.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..65a4ac08f5df51ee613c5197fd9a235daae3037a
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_recogn1x100_latin.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c363b8f74b8fa74c4bc01bb6cdd4991ce2c57d2b56d6a69646c948087aac9c48
+size 6995173
diff --git a/assets/models/ultimateALPR-SDK_recogn2x150_chinese.desktop.model.doubango b/assets/models/ultimateALPR-SDK_recogn2x150_chinese.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..778bd222675c10a8379ba199035ba54913eb6a65
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_recogn2x150_chinese.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74d859bc7b924f9e23776e2a33f82b16af20bfc110d028ebe1c2ddc93a0f1a56
+size 7249220
diff --git a/assets/models/ultimateALPR-SDK_recogn2x150_korean.desktop.model.doubango b/assets/models/ultimateALPR-SDK_recogn2x150_korean.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..f72a29ee54f4966741fb04fe808523be89af439c
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_recogn2x150_korean.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be4376f2a26f9d75bb8aa515db2255e22279ab4068758023322200a36c30abc4
+size 7297622
diff --git a/assets/models/ultimateALPR-SDK_recogn2x150_latin.desktop.model.doubango b/assets/models/ultimateALPR-SDK_recogn2x150_latin.desktop.model.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..806600e449284782044ac9023bd505786d14cfb4
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_recogn2x150_latin.desktop.model.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b77a83254e4cef1635b33c18aa9db0ddc47083f63dd2282de9bdb5b3087ed1e5
+size 7035264
diff --git a/assets/models/ultimateALPR-SDK_tensorflow_fp.txt.doubango b/assets/models/ultimateALPR-SDK_tensorflow_fp.txt.doubango
new file mode 100644
index 0000000000000000000000000000000000000000..4644a399dc63cf3648924f008bf10686d8e442c4
--- /dev/null
+++ b/assets/models/ultimateALPR-SDK_tensorflow_fp.txt.doubango
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:45f54632372ae2c7a07e7b4ceb68b63e8484815540f11b091725c01f69c31400
+size 356
diff --git a/assets/result_imgs/china.jpg b/assets/result_imgs/china.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c4e23ca83c3c80649a822b23911833fbf128c4ec
Binary files /dev/null and b/assets/result_imgs/china.jpg differ
diff --git a/assets/result_imgs/korea.jpg b/assets/result_imgs/korea.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..91309653aa75d0c5720d0c44fcd99eab28a0e657
Binary files /dev/null and b/assets/result_imgs/korea.jpg differ
diff --git a/assets/result_imgs/lic_us_1280x720.jpg b/assets/result_imgs/lic_us_1280x720.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5f52819b2e48845636da8f2db4ccb83809c6fca0
Binary files /dev/null and b/assets/result_imgs/lic_us_1280x720.jpg differ
diff --git a/assets/result_imgs/london_traffic.jpg b/assets/result_imgs/london_traffic.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8407f39c510beccd68f6a61564534a8058528816
Binary files /dev/null and b/assets/result_imgs/london_traffic.jpg differ
diff --git a/assets/result_imgs/multi.jpg b/assets/result_imgs/multi.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..04a29ebb73a57b85ec8b6c7aeee1e0eb8174609f
Binary files /dev/null and b/assets/result_imgs/multi.jpg differ
diff --git a/assets/result_imgs/nyc.jpg b/assets/result_imgs/nyc.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a65de6d9756270c2fa882b396c0fbc69edc9ed17
Binary files /dev/null and b/assets/result_imgs/nyc.jpg differ
diff --git a/binaries/README.md b/binaries/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..856afc0ccce01b785094b0d383287430a3cfc30a
--- /dev/null
+++ b/binaries/README.md
@@ -0,0 +1,14 @@
+Binaries (shared libraries) for
+- [Android](android): arm64-v8a, armeabi-v7a, x86, x86_64
+- [iOS](ios): armv7s, arm64
+- [Raspberry Pi (Raspbian OS)](raspbian): armv7l
+- [Windows](windows): x86_64
+- [Linux](linux): x86_64, aarch64
+- [jetson](jetson): aarch64
+- [jetson_tftrt](jetson_tftrt): aarch64
+
+The diference between [jetson](jetson) and [jetson_tftrt](jetson_tftrt) is explained [here](../Jetson.md#getting-started_jetson-versus-jetsontftrt).
+
+OpenCL 1.2+ will be loaded at runtime (no linking).
+
+
diff --git a/binaries/linux/README.md b/binaries/linux/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0929eaf11a0c2ff63c5d352e026e766afa590325
--- /dev/null
+++ b/binaries/linux/README.md
@@ -0,0 +1,3 @@
+Linux binaries:
+- [x86_64](x86_64): Binaries built on **Ubuntu 18** for **x86_64** CPUs.
+- [aarch64](aarch64): Binaries built on **Ubuntu 18** for **AArch64** CPUs.
diff --git a/binaries/linux/x86_64/README.md b/binaries/linux/x86_64/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b07172fc20154b4e375a354bcac67c0ab9adf9c9
--- /dev/null
+++ b/binaries/linux/x86_64/README.md
@@ -0,0 +1,14 @@
+This folder contains binaries built on **Ubuntu 18** for **x86_64** CPUs.
+
+Result for `g++ -v`:
+```
+Using built-in specs.
+COLLECT_GCC=g++
+COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/7/lto-wrapper
+OFFLOAD_TARGET_NAMES=nvptx-none
+OFFLOAD_TARGET_DEFAULT=1
+Target: x86_64-linux-gnu
+Configured with: ../src/configure -v --with-pkgversion='Ubuntu 7.4.0-1ubuntu1~18.04.1' --with-bugurl=file:///usr/share/doc/gcc-7/README.Bugs --enable-languages=c,ada,c++,go,brig,d,fortran,objc,obj-c++ --prefix=/usr --with-gcc-major-version-only --program-suffix=-7 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --with-sysroot=/ --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-libmpx --enable-plugin --enable-default-pie --with-system-zlib --with-target-system-zlib --enable-objc-gc=auto --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu
+Thread model: posix
+gcc version 7.4.0 (Ubuntu 7.4.0-1ubuntu1~18.04.1)
+```
diff --git a/binaries/linux/x86_64/_ultimateAlprSdk.so b/binaries/linux/x86_64/_ultimateAlprSdk.so
new file mode 100755
index 0000000000000000000000000000000000000000..1a3d80177ff318679d9c12c84ea3304872353b7c
--- /dev/null
+++ b/binaries/linux/x86_64/_ultimateAlprSdk.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c37017b8ec7ea067c5d0975984da380fc4128c9e20295dc52f071254304a4f2b
+size 621920
diff --git a/binaries/linux/x86_64/benchmark b/binaries/linux/x86_64/benchmark
new file mode 100644
index 0000000000000000000000000000000000000000..a5c48b2cc310f107ffa7394b5e6ed6c578e2b327
Binary files /dev/null and b/binaries/linux/x86_64/benchmark differ
diff --git a/binaries/linux/x86_64/build/temp.linux-x86_64-cpython-38/home/pc-work/Documents/Cradle/ultimateALPR-SDK/python/ultimateALPR-SDK-API-PUBLIC-SWIG_python.o b/binaries/linux/x86_64/build/temp.linux-x86_64-cpython-38/home/pc-work/Documents/Cradle/ultimateALPR-SDK/python/ultimateALPR-SDK-API-PUBLIC-SWIG_python.o
new file mode 100644
index 0000000000000000000000000000000000000000..5c676ec7c2cbc84f22536d5e4070c8af7a0cfb90
--- /dev/null
+++ b/binaries/linux/x86_64/build/temp.linux-x86_64-cpython-38/home/pc-work/Documents/Cradle/ultimateALPR-SDK/python/ultimateALPR-SDK-API-PUBLIC-SWIG_python.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5af83304ed200d65ddd3999f2361a61b41b391f156c8a172f5df06c11780db3
+size 1265080
diff --git a/binaries/linux/x86_64/libGNAPlugin.so b/binaries/linux/x86_64/libGNAPlugin.so
new file mode 100644
index 0000000000000000000000000000000000000000..1e3fc51c4e875e98abab0c81d0fe97b06323c2bf
--- /dev/null
+++ b/binaries/linux/x86_64/libGNAPlugin.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5ea0436e57210533ab02eafaf10cf95967fe61fc9b1b03180f59bf5eba147ac1
+size 1038584
diff --git a/binaries/linux/x86_64/libHDDLPlugin.so b/binaries/linux/x86_64/libHDDLPlugin.so
new file mode 100644
index 0000000000000000000000000000000000000000..3f06a55c844d4868566192b6e7372b65de81d910
--- /dev/null
+++ b/binaries/linux/x86_64/libHDDLPlugin.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2c14f779a5ab4e0bf25a1f6a6ec4b23de340bdad56b884e38d2502ee14062825
+size 2721896
diff --git a/binaries/linux/x86_64/libHeteroPlugin.so b/binaries/linux/x86_64/libHeteroPlugin.so
new file mode 100644
index 0000000000000000000000000000000000000000..319a656ae0cf89b342f0401dfc8d982478252dfb
--- /dev/null
+++ b/binaries/linux/x86_64/libHeteroPlugin.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:05d37595adcf7e831539c4a82fe1bf5c55959cf73527a783db2602eb084e07c0
+size 444424
diff --git a/binaries/linux/x86_64/libMKLDNNPlugin.so b/binaries/linux/x86_64/libMKLDNNPlugin.so
new file mode 100644
index 0000000000000000000000000000000000000000..4c105a7bf887304c4fa3f6fd83e3c2f01e8c5c78
--- /dev/null
+++ b/binaries/linux/x86_64/libMKLDNNPlugin.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a13f72420e514138a4f45a004ac0feb96b809865242451d92600a919c2add153
+size 31287984
diff --git a/binaries/linux/x86_64/libMultiDevicePlugin.so b/binaries/linux/x86_64/libMultiDevicePlugin.so
new file mode 100644
index 0000000000000000000000000000000000000000..0c5793246d6eb87ccf5f3ca20f7a65245708a1b1
--- /dev/null
+++ b/binaries/linux/x86_64/libMultiDevicePlugin.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef839796fd1645fc9d5f9e209fd0eb6eccfdcf320906c92a8643c5e0b0c7e396
+size 309328
diff --git a/binaries/linux/x86_64/libclDNNPlugin.so b/binaries/linux/x86_64/libclDNNPlugin.so
new file mode 100644
index 0000000000000000000000000000000000000000..7159eaf2f756213ebc75108fae62747b3c6af317
--- /dev/null
+++ b/binaries/linux/x86_64/libclDNNPlugin.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c12fce0df2a77bf129c53e019c237aac61f6e058505f12f346a204be561f7995
+size 9078576
diff --git a/binaries/linux/x86_64/libinference_engine.so b/binaries/linux/x86_64/libinference_engine.so
new file mode 100644
index 0000000000000000000000000000000000000000..de81e13cfe9ed85dfcf5d0f476c90af4326054ab
--- /dev/null
+++ b/binaries/linux/x86_64/libinference_engine.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af72d67e69a1b5e0d90a8d588a0b74211bb04398ebb95e2d8b4dea0bdef87fbb
+size 1681536
diff --git a/binaries/linux/x86_64/libinference_engine_legacy.so b/binaries/linux/x86_64/libinference_engine_legacy.so
new file mode 100644
index 0000000000000000000000000000000000000000..40552b0df5e9f3332747442bfe7f48f0e7ea50af
--- /dev/null
+++ b/binaries/linux/x86_64/libinference_engine_legacy.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:08e8d18454b7e61ced453b731a5192adfbf96b73da578f94ad54432126ae4f20
+size 2447376
diff --git a/binaries/linux/x86_64/libinference_engine_lp_transformations.so b/binaries/linux/x86_64/libinference_engine_lp_transformations.so
new file mode 100644
index 0000000000000000000000000000000000000000..1ff9cadcdfe97ddc1f16626717b79f45b567b6c6
--- /dev/null
+++ b/binaries/linux/x86_64/libinference_engine_lp_transformations.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:493814189d2820fe2c34e3c6e2c0b6f1449c4e883001bc0a4b59b59930146e8b
+size 514040
diff --git a/binaries/linux/x86_64/libinference_engine_transformations.so b/binaries/linux/x86_64/libinference_engine_transformations.so
new file mode 100644
index 0000000000000000000000000000000000000000..a5c2708887c00eb3ca7d3d92c14dab93afa3c774
--- /dev/null
+++ b/binaries/linux/x86_64/libinference_engine_transformations.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aae52b29d8b16bb3fd194ba0c5781e86175fef7b6afeb2e51b9982eadf5a430f
+size 845816
diff --git a/binaries/linux/x86_64/libmyriadPlugin.so b/binaries/linux/x86_64/libmyriadPlugin.so
new file mode 100644
index 0000000000000000000000000000000000000000..02e05cf887c80c679d0da59419540c299fdc6d32
--- /dev/null
+++ b/binaries/linux/x86_64/libmyriadPlugin.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:72142df2eff52876d6f749c724b2b8e2678c1b81235a84b83ce48de3792cef30
+size 2914600
diff --git a/binaries/linux/x86_64/libngraph.so b/binaries/linux/x86_64/libngraph.so
new file mode 100644
index 0000000000000000000000000000000000000000..69e050c858bc4643b200b712cc37a596fc5726ed
--- /dev/null
+++ b/binaries/linux/x86_64/libngraph.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79a5af7b8bf4e63c2cf38d4bc24a31980a042f6ae4de9fb832206be8f4916b60
+size 8565696
diff --git a/binaries/linux/x86_64/libtbb.so b/binaries/linux/x86_64/libtbb.so
new file mode 100644
index 0000000000000000000000000000000000000000..0edc6c980e114ba00afb159cdf1d8ec8984ec862
--- /dev/null
+++ b/binaries/linux/x86_64/libtbb.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:37b507cef26b47b7d1c2beefc91ec1e30d61b0e94073bebca780ac6870a62878
+size 20
diff --git a/binaries/linux/x86_64/libtbb.so.2 b/binaries/linux/x86_64/libtbb.so.2
new file mode 100644
index 0000000000000000000000000000000000000000..dbdaa7622f97b2d480f9695de0f515ddc288a91b
--- /dev/null
+++ b/binaries/linux/x86_64/libtbb.so.2
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:13973e8505477a1b1b57f5d6f98f50d1f072682370dba04b04163800fd7cea2a
+size 415864
diff --git a/binaries/linux/x86_64/libtensorflow.so b/binaries/linux/x86_64/libtensorflow.so
new file mode 100755
index 0000000000000000000000000000000000000000..5094df02fad61fa6bef3bcffcb9673941a7136fb
--- /dev/null
+++ b/binaries/linux/x86_64/libtensorflow.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abe6b330b9d71faba282931e9ad5ec16d77e13d1daf595173084abd0de957f79
+size 382810991
diff --git a/binaries/linux/x86_64/libtensorflow.so.1 b/binaries/linux/x86_64/libtensorflow.so.1
new file mode 100755
index 0000000000000000000000000000000000000000..5094df02fad61fa6bef3bcffcb9673941a7136fb
--- /dev/null
+++ b/binaries/linux/x86_64/libtensorflow.so.1
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abe6b330b9d71faba282931e9ad5ec16d77e13d1daf595173084abd0de957f79
+size 382810991
diff --git a/binaries/linux/x86_64/libtensorflow.so.1.14.0 b/binaries/linux/x86_64/libtensorflow.so.1.14.0
new file mode 100755
index 0000000000000000000000000000000000000000..5094df02fad61fa6bef3bcffcb9673941a7136fb
--- /dev/null
+++ b/binaries/linux/x86_64/libtensorflow.so.1.14.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abe6b330b9d71faba282931e9ad5ec16d77e13d1daf595173084abd0de957f79
+size 382810991
diff --git a/binaries/linux/x86_64/libtensorflow_framework.so b/binaries/linux/x86_64/libtensorflow_framework.so
new file mode 100755
index 0000000000000000000000000000000000000000..d88f98c2a6813ef88b1305078bdd890180e373c6
--- /dev/null
+++ b/binaries/linux/x86_64/libtensorflow_framework.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:700759ba3ee33646b13984fa1aefda7dff59bed19fcf8782327afac24247b422
+size 35655504
diff --git a/binaries/linux/x86_64/libtensorflow_framework.so.1 b/binaries/linux/x86_64/libtensorflow_framework.so.1
new file mode 100755
index 0000000000000000000000000000000000000000..d88f98c2a6813ef88b1305078bdd890180e373c6
--- /dev/null
+++ b/binaries/linux/x86_64/libtensorflow_framework.so.1
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:700759ba3ee33646b13984fa1aefda7dff59bed19fcf8782327afac24247b422
+size 35655504
diff --git a/binaries/linux/x86_64/libtensorflow_framework.so.1.14.0 b/binaries/linux/x86_64/libtensorflow_framework.so.1.14.0
new file mode 100755
index 0000000000000000000000000000000000000000..d88f98c2a6813ef88b1305078bdd890180e373c6
--- /dev/null
+++ b/binaries/linux/x86_64/libtensorflow_framework.so.1.14.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:700759ba3ee33646b13984fa1aefda7dff59bed19fcf8782327afac24247b422
+size 35655504
diff --git a/binaries/linux/x86_64/libultimatePluginOpenVino.so b/binaries/linux/x86_64/libultimatePluginOpenVino.so
new file mode 100644
index 0000000000000000000000000000000000000000..47a8ba30de64795626a9b910466e9bd6ebd58500
--- /dev/null
+++ b/binaries/linux/x86_64/libultimatePluginOpenVino.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9af15ce43f0fb7b53359ddef719d837d6584c10082facc387a94251eda63b88d
+size 96984
diff --git a/binaries/linux/x86_64/libultimate_alpr-sdk.so b/binaries/linux/x86_64/libultimate_alpr-sdk.so
new file mode 100644
index 0000000000000000000000000000000000000000..0bf520ab7a7b55b840e440a8d5e1a632786fdf8c
--- /dev/null
+++ b/binaries/linux/x86_64/libultimate_alpr-sdk.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cd2847c0fb7b94040de0bbaa590b783408d3a6afa8dc5bb275054d9642a13295
+size 4321104
diff --git a/binaries/linux/x86_64/plugins.xml b/binaries/linux/x86_64/plugins.xml
new file mode 100644
index 0000000000000000000000000000000000000000..1e8a56363844d27e65d1e06fba604f2f33a83429
--- /dev/null
+++ b/binaries/linux/x86_64/plugins.xml
@@ -0,0 +1,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/binaries/linux/x86_64/python_recognizer.sh b/binaries/linux/x86_64/python_recognizer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..88045e40cf4b18b850e2d63da78d49610e8b71fd
--- /dev/null
+++ b/binaries/linux/x86_64/python_recognizer.sh
@@ -0,0 +1,3 @@
+PYTHONPATH=$PYTHONPATH:.:../../../python \
+LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH \
+python3 ../../../samples/python/recognizer/recognizer.py --image ../../../assets/images/lic_us_1280x720.jpg --assets ../../../assets --klass_lpci_enabled True --klass_vcr_enabled True --klass_vmmr_enabled True
diff --git a/binaries/linux/x86_64/python_setup.sh b/binaries/linux/x86_64/python_setup.sh
new file mode 100644
index 0000000000000000000000000000000000000000..5eef6869a80a4d302dc7ef26c133bd6af88fcfb7
--- /dev/null
+++ b/binaries/linux/x86_64/python_setup.sh
@@ -0,0 +1 @@
+python3 ../../../python/setup.py build_ext --inplace -v
\ No newline at end of file
diff --git a/binaries/linux/x86_64/recognizer b/binaries/linux/x86_64/recognizer
new file mode 100644
index 0000000000000000000000000000000000000000..42220166ef35ac2e5382b810ce0014ea219ea0ac
Binary files /dev/null and b/binaries/linux/x86_64/recognizer differ
diff --git a/binaries/linux/x86_64/runtimeKey b/binaries/linux/x86_64/runtimeKey
new file mode 100644
index 0000000000000000000000000000000000000000..25c771403490bd7f5e5005500ac07cc646469e33
Binary files /dev/null and b/binaries/linux/x86_64/runtimeKey differ
diff --git a/classificatsion_video_demo.py b/classificatsion_video_demo.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3a9a63e6cc4e101cc1c8c2671e94f2d57cc665b
--- /dev/null
+++ b/classificatsion_video_demo.py
@@ -0,0 +1,97 @@
+import cv2
+import os.path
+# from frame_do_video import images_to_video
+import numpy as np
+from ultralytics import YOLO
+from file_utils import project_dir
+
+
+def train():
+ """
+ Funksiya modelni train qiladi
+ data uyidagi formatda bo'lish kerak
+ # - splitted
+ # - train
+ # - good
+ # - problem
+ # - val
+ # - good
+ # - problem
+ """
+
+ data_joyi = 'traffic_laws/scripts/splitted/'
+ model = YOLO('yolov8n-cls.pt')
+ model.train(data=data_joyi, epochs=100, imgsz=224, batch=512, save_period=10, device='cuda:0', augment=True)
+ metrics = model.val()
+ print(metrics.top1) # top1 aniqligi
+
+def tekshirish(path2):
+ """
+ test qilish, model va rasmni berishimiz kerak
+ """
+ train_qilingan_model_joyi = os.path.join(
+ project_dir(),
+ "models",
+ "classification",
+ "tl-14",
+ "weights/best.pt"
+ )
+ test_rasm_joyi =(path2)
+
+ model_custom = YOLO("models/classification/tl-14/weights/best.pt")
+ natijalar = model_custom(test_rasm_joyi) # predict on an image
+ natija = natijalar[0].names[np.argmax(natijalar[0].probs.cpu().numpy().data)]
+ return (f"Label natija: {natija}")
+
+
+
+def process(video_path):
+
+ saqlash_path = video_path.split('/')[-1].split(".")[0]
+ problem_frame = 0
+ good_frame = 0
+ print("currnet path: ", os.getcwd())
+ if not os.path.exists(saqlash_path):
+ # Create a new directory because it does not exist
+ os.makedirs(saqlash_path)
+
+ if not os.path.exists(saqlash_path):
+ raise Exception("Sorry, no numbers below zero")
+ cap = cv2.VideoCapture(video_path)
+
+ fourcc = cv2.VideoWriter_fourcc(*'XVID')
+ # out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480))
+ out = cv2.VideoWriter(
+ saqlash_path, fourcc, 20.0, (int(cap.get(3)), int(cap.get(4)))
+ )
+
+ while(cap.isOpened()):
+ ret, frame = cap.read()
+ if ret==True:
+ #frame = cv2.flip(frame,1)
+ # print(tekshirish(frame))
+ if tekshirish(frame) == "Label natija: good":
+ font = cv2.FONT_HERSHEY_COMPLEX
+ cv2.putText(frame, 'good', (0, 100), font, 2, (255, 255, 255), 3)
+ good_frame += 1
+ elif tekshirish(frame) == "Label natija: problem":
+ font = cv2.FONT_HERSHEY_COMPLEX
+ cv2.putText(frame, 'problem', (0, 100), font, 2, (255, 255, 255), 3)
+ # out.write(frame)
+ cv2.imwrite(saqlash_path + "/%#05d.jpg" % problem_frame, frame)
+ problem_frame += 1
+ # cv2.imshow('frame' ,frame)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+ else:
+ break
+ cap.release()
+
+ out.release()
+
+ cv2.destroyAllWindows()
+
+ # images_to_video(saqlash_path, video_name = saqlash_path+'_problem.mp4', fps = 24)
+
+ return problem_frame,good_frame,saqlash_path
diff --git a/file_utils.py b/file_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..643c1a9f6bde5e376fe9da4e2c4b62034f7a8518
--- /dev/null
+++ b/file_utils.py
@@ -0,0 +1,12 @@
+import os
+
+
+def project_dir():
+ """
+ Returns path to the project root
+ Returns
+ -------
+ Path
+ Return path to the project root
+ """
+ return os.path.dirname(os.path.dirname(__file__))
\ No newline at end of file
diff --git a/models/__init__.py b/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/models/classification/tl-14/args.yaml b/models/classification/tl-14/args.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..59d54766afd2da5caa6d27f72994b1e9927a794e
--- /dev/null
+++ b/models/classification/tl-14/args.yaml
@@ -0,0 +1,95 @@
+task: classify
+mode: train
+model: yolov8n-cls.pt
+data: /home/cradle/work/git/traffic_laws/scripts/splitted/
+epochs: 100
+patience: 50
+batch: 512
+imgsz: 224
+save: true
+save_period: 10
+cache: false
+device: cuda:0
+workers: 8
+project: null
+name: null
+exist_ok: false
+pretrained: false
+optimizer: SGD
+verbose: true
+seed: 0
+deterministic: true
+single_cls: false
+rect: false
+cos_lr: false
+close_mosaic: 0
+resume: false
+amp: true
+overlap_mask: true
+mask_ratio: 4
+dropout: 0.0
+val: true
+split: val
+save_json: false
+save_hybrid: false
+conf: null
+iou: 0.7
+max_det: 300
+half: false
+dnn: false
+plots: true
+source: null
+show: false
+save_txt: false
+save_conf: false
+save_crop: false
+show_labels: true
+show_conf: true
+vid_stride: 1
+line_width: null
+visualize: false
+augment: true
+agnostic_nms: false
+classes: null
+retina_masks: false
+boxes: true
+format: torchscript
+keras: false
+optimize: false
+int8: false
+dynamic: false
+simplify: false
+opset: null
+workspace: 4
+nms: false
+lr0: 0.01
+lrf: 0.01
+momentum: 0.937
+weight_decay: 0.0005
+warmup_epochs: 3.0
+warmup_momentum: 0.8
+warmup_bias_lr: 0.1
+box: 7.5
+cls: 0.5
+dfl: 1.5
+pose: 12.0
+kobj: 1.0
+label_smoothing: 0.0
+nbs: 64
+hsv_h: 0.015
+hsv_s: 0.7
+hsv_v: 0.4
+degrees: 0.0
+translate: 0.1
+scale: 0.5
+shear: 0.0
+perspective: 0.0
+flipud: 0.0
+fliplr: 0.5
+mosaic: 1.0
+mixup: 0.0
+copy_paste: 0.0
+cfg: null
+v5loader: false
+tracker: botsort.yaml
+save_dir: /home/cradle/work/git/traffic_laws/runs/classify/train14
diff --git a/models/classification/tl-14/confusion_matrix.png b/models/classification/tl-14/confusion_matrix.png
new file mode 100644
index 0000000000000000000000000000000000000000..a57d13c31da0e7495d6ba9ae77987349c50a59a2
Binary files /dev/null and b/models/classification/tl-14/confusion_matrix.png differ
diff --git a/models/classification/tl-14/confusion_matrix_normalized.png b/models/classification/tl-14/confusion_matrix_normalized.png
new file mode 100644
index 0000000000000000000000000000000000000000..bace79980a29044a395bd0d9032598a118be5b6b
Binary files /dev/null and b/models/classification/tl-14/confusion_matrix_normalized.png differ
diff --git a/models/classification/tl-14/results.csv b/models/classification/tl-14/results.csv
new file mode 100644
index 0000000000000000000000000000000000000000..4821e78bf20b5dde46ce440f3937395ed01cfbfb
--- /dev/null
+++ b/models/classification/tl-14/results.csv
@@ -0,0 +1,101 @@
+ epoch, train/loss, metrics/accuracy_top1, metrics/accuracy_top5, val/loss, lr/pg0, lr/pg1, lr/pg2
+ 0, 5.5416, 0.50669, 1, 10.956, 0.070159, 0.0033157, 0.0033157
+ 1, 5.5047, 0.55411, 1, 10.75, 0.040093, 0.0065832, 0.0065832
+ 2, 5.4222, 0.58461, 1, 10.639, 0.0099611, 0.0097847, 0.0097847
+ 3, 5.2532, 0.69717, 1, 9.8313, 0.009703, 0.009703, 0.009703
+ 4, 4.958, 0.73121, 1, 9.2953, 0.009703, 0.009703, 0.009703
+ 5, 4.7642, 0.68044, 1, 9.7063, 0.009604, 0.009604, 0.009604
+ 6, 4.6205, 0.75364, 1, 9.0844, 0.009505, 0.009505, 0.009505
+ 7, 4.5059, 0.72334, 1, 9.3578, 0.009406, 0.009406, 0.009406
+ 8, 4.4169, 0.79418, 1, 8.3141, 0.009307, 0.009307, 0.009307
+ 9, 4.3508, 0.79378, 1, 8.7203, 0.009208, 0.009208, 0.009208
+ 10, 4.2707, 0.80067, 1, 8.4641, 0.009109, 0.009109, 0.009109
+ 11, 4.2023, 0.805, 1, 8.1141, 0.00901, 0.00901, 0.00901
+ 12, 4.1543, 0.82468, 1, 8.2203, 0.008911, 0.008911, 0.008911
+ 13, 4.1146, 0.78867, 1, 8.4797, 0.008812, 0.008812, 0.008812
+ 14, 4.0376, 0.76348, 1, 9.0609, 0.008713, 0.008713, 0.008713
+ 15, 3.9988, 0.84947, 1, 7.6094, 0.008614, 0.008614, 0.008614
+ 16, 3.9219, 0.84632, 1, 7.8945, 0.008515, 0.008515, 0.008515
+ 17, 3.8627, 0.85557, 1, 7.4461, 0.008416, 0.008416, 0.008416
+ 18, 3.8134, 0.82113, 1, 8.1742, 0.008317, 0.008317, 0.008317
+ 19, 3.7674, 0.86364, 1, 7.3414, 0.008218, 0.008218, 0.008218
+ 20, 3.6872, 0.86698, 1, 7.3883, 0.008119, 0.008119, 0.008119
+ 21, 3.6458, 0.88508, 1, 7.0422, 0.00802, 0.00802, 0.00802
+ 22, 3.6013, 0.87682, 1, 7.0398, 0.007921, 0.007921, 0.007921
+ 23, 3.5386, 0.88843, 1, 7.0906, 0.007822, 0.007822, 0.007822
+ 24, 3.5021, 0.90929, 1, 6.7047, 0.007723, 0.007723, 0.007723
+ 25, 3.4811, 0.89709, 1, 6.9242, 0.007624, 0.007624, 0.007624
+ 26, 3.4568, 0.91106, 1, 6.7039, 0.007525, 0.007525, 0.007525
+ 27, 3.4255, 0.90712, 1, 6.6664, 0.007426, 0.007426, 0.007426
+ 28, 3.374, 0.90338, 1, 6.8336, 0.007327, 0.007327, 0.007327
+ 29, 3.3352, 0.91637, 1, 6.5492, 0.007228, 0.007228, 0.007228
+ 30, 3.3159, 0.91558, 1, 6.5406, 0.007129, 0.007129, 0.007129
+ 31, 3.2674, 0.92247, 1, 6.4414, 0.00703, 0.00703, 0.00703
+ 32, 3.2484, 0.92562, 1, 6.3805, 0.006931, 0.006931, 0.006931
+ 33, 3.2061, 0.9205, 1, 6.3875, 0.006832, 0.006832, 0.006832
+ 34, 3.1725, 0.92995, 1, 6.325, 0.006733, 0.006733, 0.006733
+ 35, 3.1673, 0.92641, 1, 6.3453, 0.006634, 0.006634, 0.006634
+ 36, 3.123, 0.92601, 1, 6.2336, 0.006535, 0.006535, 0.006535
+ 37, 3.1132, 0.92778, 1, 6.2602, 0.006436, 0.006436, 0.006436
+ 38, 3.0721, 0.93152, 1, 6.207, 0.006337, 0.006337, 0.006337
+ 39, 3.0491, 0.93664, 1, 6.1188, 0.006238, 0.006238, 0.006238
+ 40, 3.0232, 0.94097, 1, 6.0781, 0.006139, 0.006139, 0.006139
+ 41, 2.9917, 0.939, 1, 6.0617, 0.00604, 0.00604, 0.00604
+ 42, 2.9734, 0.94116, 1, 6.0453, 0.005941, 0.005941, 0.005941
+ 43, 2.9534, 0.94156, 1, 6.0016, 0.005842, 0.005842, 0.005842
+ 44, 2.907, 0.93959, 1, 5.9914, 0.005743, 0.005743, 0.005743
+ 45, 2.8945, 0.94353, 1, 5.9461, 0.005644, 0.005644, 0.005644
+ 46, 2.873, 0.94648, 1, 5.9039, 0.005545, 0.005545, 0.005545
+ 47, 2.8462, 0.94884, 1, 5.9281, 0.005446, 0.005446, 0.005446
+ 48, 2.8267, 0.94923, 1, 5.907, 0.005347, 0.005347, 0.005347
+ 49, 2.8004, 0.94667, 1, 5.8906, 0.005248, 0.005248, 0.005248
+ 50, 2.7884, 0.95081, 1, 5.8547, 0.005149, 0.005149, 0.005149
+ 51, 2.7668, 0.95356, 1, 5.818, 0.00505, 0.00505, 0.00505
+ 52, 2.7351, 0.95651, 1, 5.7719, 0.004951, 0.004951, 0.004951
+ 53, 2.7076, 0.95514, 1, 5.7742, 0.004852, 0.004852, 0.004852
+ 54, 2.697, 0.95553, 1, 5.7711, 0.004753, 0.004753, 0.004753
+ 55, 2.6597, 0.9571, 1, 5.7313, 0.004654, 0.004654, 0.004654
+ 56, 2.6733, 0.95632, 1, 5.7188, 0.004555, 0.004555, 0.004555
+ 57, 2.6115, 0.95553, 1, 5.7164, 0.004456, 0.004456, 0.004456
+ 58, 2.6045, 0.95907, 1, 5.693, 0.004357, 0.004357, 0.004357
+ 59, 2.5843, 0.95828, 1, 5.6852, 0.004258, 0.004258, 0.004258
+ 60, 2.5937, 0.95966, 1, 5.6914, 0.004159, 0.004159, 0.004159
+ 61, 2.5564, 0.96124, 1, 5.6688, 0.00406, 0.00406, 0.00406
+ 62, 2.5377, 0.96242, 1, 5.6602, 0.003961, 0.003961, 0.003961
+ 63, 2.5002, 0.96379, 1, 5.6375, 0.003862, 0.003862, 0.003862
+ 64, 2.4955, 0.96478, 1, 5.6242, 0.003763, 0.003763, 0.003763
+ 65, 2.4717, 0.9636, 1, 5.6102, 0.003664, 0.003664, 0.003664
+ 66, 2.4411, 0.96556, 1, 5.5938, 0.003565, 0.003565, 0.003565
+ 67, 2.4351, 0.96556, 1, 5.5859, 0.003466, 0.003466, 0.003466
+ 68, 2.4095, 0.96497, 1, 5.5828, 0.003367, 0.003367, 0.003367
+ 69, 2.3852, 0.96616, 1, 5.5734, 0.003268, 0.003268, 0.003268
+ 70, 2.373, 0.96596, 1, 5.5672, 0.003169, 0.003169, 0.003169
+ 71, 2.329, 0.96576, 1, 5.5625, 0.00307, 0.00307, 0.00307
+ 72, 2.3093, 0.96635, 1, 5.5516, 0.002971, 0.002971, 0.002971
+ 73, 2.294, 0.96655, 1, 5.5484, 0.002872, 0.002872, 0.002872
+ 74, 2.2737, 0.96675, 1, 5.5414, 0.002773, 0.002773, 0.002773
+ 75, 2.2477, 0.96773, 1, 5.5352, 0.002674, 0.002674, 0.002674
+ 76, 2.2475, 0.96891, 1, 5.5266, 0.002575, 0.002575, 0.002575
+ 77, 2.2049, 0.9697, 1, 5.5203, 0.002476, 0.002476, 0.002476
+ 78, 2.177, 0.9695, 1, 5.5141, 0.002377, 0.002377, 0.002377
+ 79, 2.1432, 0.97029, 1, 5.5055, 0.002278, 0.002278, 0.002278
+ 80, 2.1023, 0.97068, 1, 5.4969, 0.002179, 0.002179, 0.002179
+ 81, 2.0898, 0.97127, 1, 5.4898, 0.00208, 0.00208, 0.00208
+ 82, 2.0706, 0.97127, 1, 5.4828, 0.001981, 0.001981, 0.001981
+ 83, 2.0451, 0.97127, 1, 5.4773, 0.001882, 0.001882, 0.001882
+ 84, 2.0271, 0.97166, 1, 5.4695, 0.001783, 0.001783, 0.001783
+ 85, 1.9841, 0.97245, 1, 5.4648, 0.001684, 0.001684, 0.001684
+ 86, 1.9627, 0.97245, 1, 5.4586, 0.001585, 0.001585, 0.001585
+ 87, 1.9219, 0.97383, 1, 5.4523, 0.001486, 0.001486, 0.001486
+ 88, 1.8855, 0.97442, 1, 5.4461, 0.001387, 0.001387, 0.001387
+ 89, 1.8957, 0.97442, 1, 5.4422, 0.001288, 0.001288, 0.001288
+ 90, 1.8153, 0.97422, 1, 5.4367, 0.001189, 0.001189, 0.001189
+ 91, 1.8152, 0.97422, 1, 5.4352, 0.00109, 0.00109, 0.00109
+ 92, 1.7587, 0.97442, 1, 5.4289, 0.000991, 0.000991, 0.000991
+ 93, 1.735, 0.97403, 1, 5.4266, 0.000892, 0.000892, 0.000892
+ 94, 1.7055, 0.97501, 1, 5.4242, 0.000793, 0.000793, 0.000793
+ 95, 1.6447, 0.9754, 1, 5.4195, 0.000694, 0.000694, 0.000694
+ 96, 1.6172, 0.9754, 1, 5.4156, 0.000595, 0.000595, 0.000595
+ 97, 1.582, 0.9756, 1, 5.4117, 0.000496, 0.000496, 0.000496
+ 98, 1.517, 0.97717, 1, 5.4078, 0.000397, 0.000397, 0.000397
+ 99, 1.4948, 0.97717, 1, 5.4047, 0.000298, 0.000298, 0.000298
diff --git a/models/classification/tl-14/results.png b/models/classification/tl-14/results.png
new file mode 100644
index 0000000000000000000000000000000000000000..09992c0a8d90b63fb6a111fbbe77c490b6257f93
Binary files /dev/null and b/models/classification/tl-14/results.png differ
diff --git a/models/classification/tl-14/train_batch0.jpg b/models/classification/tl-14/train_batch0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..27994742b7cf64309f8a1c03f9818799bddd9061
Binary files /dev/null and b/models/classification/tl-14/train_batch0.jpg differ
diff --git a/models/classification/tl-14/train_batch1.jpg b/models/classification/tl-14/train_batch1.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a7a5fa04d4325eef2cdf2a13fda47a32748825c9
Binary files /dev/null and b/models/classification/tl-14/train_batch1.jpg differ
diff --git a/models/classification/tl-14/train_batch2.jpg b/models/classification/tl-14/train_batch2.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ea92868b48564d2306d016641af1e732ad41c33b
Binary files /dev/null and b/models/classification/tl-14/train_batch2.jpg differ
diff --git a/models/classification/tl-14/val_batch0_labels.jpg b/models/classification/tl-14/val_batch0_labels.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8e18cb3502983b14f4300654f700bcbc33e0de34
Binary files /dev/null and b/models/classification/tl-14/val_batch0_labels.jpg differ
diff --git a/models/classification/tl-14/val_batch0_pred.jpg b/models/classification/tl-14/val_batch0_pred.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8e18cb3502983b14f4300654f700bcbc33e0de34
Binary files /dev/null and b/models/classification/tl-14/val_batch0_pred.jpg differ
diff --git a/models/classification/tl-14/val_batch1_labels.jpg b/models/classification/tl-14/val_batch1_labels.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..52693599d75760825db4989a8ddb53c96c07ab85
Binary files /dev/null and b/models/classification/tl-14/val_batch1_labels.jpg differ
diff --git a/models/classification/tl-14/val_batch1_pred.jpg b/models/classification/tl-14/val_batch1_pred.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..52693599d75760825db4989a8ddb53c96c07ab85
Binary files /dev/null and b/models/classification/tl-14/val_batch1_pred.jpg differ
diff --git a/models/classification/tl-14/val_batch2_labels.jpg b/models/classification/tl-14/val_batch2_labels.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..68af7ef02d9cea90008a66a470c33b246bf09d45
Binary files /dev/null and b/models/classification/tl-14/val_batch2_labels.jpg differ
diff --git a/models/classification/tl-14/val_batch2_pred.jpg b/models/classification/tl-14/val_batch2_pred.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..68af7ef02d9cea90008a66a470c33b246bf09d45
Binary files /dev/null and b/models/classification/tl-14/val_batch2_pred.jpg differ
diff --git a/models/classification/tl-14/weights/best.pt b/models/classification/tl-14/weights/best.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d80a3b1abde907f3f9afec06b46709b88a580d17
--- /dev/null
+++ b/models/classification/tl-14/weights/best.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:059dcca63de6895eeaeb6530c9ace7f45c7fe9bb06757f55f40a30112799957f
+size 2959200
diff --git a/models/common.py b/models/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa8ae674eb472421050c715d2bdd27f12bb3c1e0
--- /dev/null
+++ b/models/common.py
@@ -0,0 +1,870 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Common modules
+"""
+
+import ast
+import contextlib
+import json
+import math
+import platform
+import warnings
+import zipfile
+from collections import OrderedDict, namedtuple
+from copy import copy
+from pathlib import Path
+from urllib.parse import urlparse
+
+import cv2
+import numpy as np
+import pandas as pd
+import requests
+import torch
+import torch.nn as nn
+from PIL import Image
+from torch.cuda import amp
+
+from utils import TryExcept
+from utils.dataloaders import exif_transpose, letterbox
+from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
+ increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
+ xyxy2xywh, yaml_load)
+from utils.plots import Annotator, colors, save_one_box
+from utils.torch_utils import copy_attr, smart_inference_mode
+
+
+def autopad(k, p=None, d=1): # kernel, padding, dilation
+ # Pad to 'same' shape outputs
+ if d > 1:
+ k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
+ if p is None:
+ p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
+ return p
+
+
+class Conv(nn.Module):
+ # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
+ default_act = nn.SiLU() # default activation
+
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
+ super().__init__()
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
+ self.bn = nn.BatchNorm2d(c2)
+ self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
+
+ def forward(self, x):
+ return self.act(self.bn(self.conv(x)))
+
+ def forward_fuse(self, x):
+ return self.act(self.conv(x))
+
+
+class DWConv(Conv):
+ # Depth-wise convolution
+ def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation
+ super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
+
+
+class DWConvTranspose2d(nn.ConvTranspose2d):
+ # Depth-wise transpose convolution
+ def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out
+ super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
+
+
+class TransformerLayer(nn.Module):
+ # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
+ def __init__(self, c, num_heads):
+ super().__init__()
+ self.q = nn.Linear(c, c, bias=False)
+ self.k = nn.Linear(c, c, bias=False)
+ self.v = nn.Linear(c, c, bias=False)
+ self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
+ self.fc1 = nn.Linear(c, c, bias=False)
+ self.fc2 = nn.Linear(c, c, bias=False)
+
+ def forward(self, x):
+ x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
+ x = self.fc2(self.fc1(x)) + x
+ return x
+
+
+class TransformerBlock(nn.Module):
+ # Vision Transformer https://arxiv.org/abs/2010.11929
+ def __init__(self, c1, c2, num_heads, num_layers):
+ super().__init__()
+ self.conv = None
+ if c1 != c2:
+ self.conv = Conv(c1, c2)
+ self.linear = nn.Linear(c2, c2) # learnable position embedding
+ self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
+ self.c2 = c2
+
+ def forward(self, x):
+ if self.conv is not None:
+ x = self.conv(x)
+ b, _, w, h = x.shape
+ p = x.flatten(2).permute(2, 0, 1)
+ return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
+
+
+class Bottleneck(nn.Module):
+ # Standard bottleneck
+ def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
+ super().__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = Conv(c_, c2, 3, 1, g=g)
+ self.add = shortcut and c1 == c2
+
+ def forward(self, x):
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
+
+
+class BottleneckCSP(nn.Module):
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
+ super().__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
+ self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
+ self.cv4 = Conv(2 * c_, c2, 1, 1)
+ self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
+ self.act = nn.SiLU()
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
+
+ def forward(self, x):
+ y1 = self.cv3(self.m(self.cv1(x)))
+ y2 = self.cv2(x)
+ return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
+
+
+class CrossConv(nn.Module):
+ # Cross Convolution Downsample
+ def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
+ # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
+ super().__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, (1, k), (1, s))
+ self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
+ self.add = shortcut and c1 == c2
+
+ def forward(self, x):
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
+
+
+class C3(nn.Module):
+ # CSP Bottleneck with 3 convolutions
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
+ super().__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = Conv(c1, c_, 1, 1)
+ self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2)
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
+
+ def forward(self, x):
+ return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
+
+
+class C3x(C3):
+ # C3 module with cross-convolutions
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+ super().__init__(c1, c2, n, shortcut, g, e)
+ c_ = int(c2 * e)
+ self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))
+
+
+class C3TR(C3):
+ # C3 module with TransformerBlock()
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+ super().__init__(c1, c2, n, shortcut, g, e)
+ c_ = int(c2 * e)
+ self.m = TransformerBlock(c_, c_, 4, n)
+
+
+class C3SPP(C3):
+ # C3 module with SPP()
+ def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
+ super().__init__(c1, c2, n, shortcut, g, e)
+ c_ = int(c2 * e)
+ self.m = SPP(c_, c_, k)
+
+
+class C3Ghost(C3):
+ # C3 module with GhostBottleneck()
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+ super().__init__(c1, c2, n, shortcut, g, e)
+ c_ = int(c2 * e) # hidden channels
+ self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
+
+
+class SPP(nn.Module):
+ # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
+ def __init__(self, c1, c2, k=(5, 9, 13)):
+ super().__init__()
+ c_ = c1 // 2 # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
+ self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
+
+ def forward(self, x):
+ x = self.cv1(x)
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
+ return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
+
+
+class SPPF(nn.Module):
+ # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
+ def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
+ super().__init__()
+ c_ = c1 // 2 # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = Conv(c_ * 4, c2, 1, 1)
+ self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
+
+ def forward(self, x):
+ x = self.cv1(x)
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
+ y1 = self.m(x)
+ y2 = self.m(y1)
+ return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
+
+
+class Focus(nn.Module):
+ # Focus wh information into c-space
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
+ super().__init__()
+ self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
+ # self.contract = Contract(gain=2)
+
+ def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
+ return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
+ # return self.conv(self.contract(x))
+
+
+class GhostConv(nn.Module):
+ # Ghost Convolution https://github.com/huawei-noah/ghostnet
+ def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
+ super().__init__()
+ c_ = c2 // 2 # hidden channels
+ self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
+ self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
+
+ def forward(self, x):
+ y = self.cv1(x)
+ return torch.cat((y, self.cv2(y)), 1)
+
+
+class GhostBottleneck(nn.Module):
+ # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
+ def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
+ super().__init__()
+ c_ = c2 // 2
+ self.conv = nn.Sequential(
+ GhostConv(c1, c_, 1, 1), # pw
+ DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
+ GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
+ self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1,
+ act=False)) if s == 2 else nn.Identity()
+
+ def forward(self, x):
+ return self.conv(x) + self.shortcut(x)
+
+
+class Contract(nn.Module):
+ # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
+ def __init__(self, gain=2):
+ super().__init__()
+ self.gain = gain
+
+ def forward(self, x):
+ b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
+ s = self.gain
+ x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
+ x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
+ return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
+
+
+class Expand(nn.Module):
+ # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
+ def __init__(self, gain=2):
+ super().__init__()
+ self.gain = gain
+
+ def forward(self, x):
+ b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
+ s = self.gain
+ x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
+ x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
+ return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
+
+
+class Concat(nn.Module):
+ # Concatenate a list of tensors along dimension
+ def __init__(self, dimension=1):
+ super().__init__()
+ self.d = dimension
+
+ def forward(self, x):
+ return torch.cat(x, self.d)
+
+
+class DetectMultiBackend(nn.Module):
+ # YOLOv5 MultiBackend class for python inference on various backends
+ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):
+ # Usage:
+ # PyTorch: weights = *.pt
+ # TorchScript: *.torchscript
+ # ONNX Runtime: *.onnx
+ # ONNX OpenCV DNN: *.onnx --dnn
+ # OpenVINO: *_openvino_model
+ # CoreML: *.mlmodel
+ # TensorRT: *.engine
+ # TensorFlow SavedModel: *_saved_model
+ # TensorFlow GraphDef: *.pb
+ # TensorFlow Lite: *.tflite
+ # TensorFlow Edge TPU: *_edgetpu.tflite
+ # PaddlePaddle: *_paddle_model
+ from models.experimental import attempt_download, attempt_load # scoped to avoid circular import
+
+ super().__init__()
+ w = str(weights[0] if isinstance(weights, list) else weights)
+ pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
+ fp16 &= pt or jit or onnx or engine # FP16
+ nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
+ stride = 32 # default stride
+ cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
+ if not (pt or triton):
+ w = attempt_download(w) # download if not local
+
+ if pt: # PyTorch
+ model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
+ stride = max(int(model.stride.max()), 32) # model stride
+ names = model.module.names if hasattr(model, 'module') else model.names # get class names
+ model.half() if fp16 else model.float()
+ self.model = model # explicitly assign for to(), cpu(), cuda(), half()
+ elif jit: # TorchScript
+ LOGGER.info(f'Loading {w} for TorchScript inference...')
+ extra_files = {'config.txt': ''} # model metadata
+ model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
+ model.half() if fp16 else model.float()
+ if extra_files['config.txt']: # load metadata dict
+ d = json.loads(extra_files['config.txt'],
+ object_hook=lambda d: {int(k) if k.isdigit() else k: v
+ for k, v in d.items()})
+ stride, names = int(d['stride']), d['names']
+ elif dnn: # ONNX OpenCV DNN
+ LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
+ check_requirements('opencv-python>=4.5.4')
+ net = cv2.dnn.readNetFromONNX(w)
+ elif onnx: # ONNX Runtime
+ LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
+ check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
+ import onnxruntime
+ providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
+ session = onnxruntime.InferenceSession(w, providers=providers)
+ output_names = [x.name for x in session.get_outputs()]
+ meta = session.get_modelmeta().custom_metadata_map # metadata
+ if 'stride' in meta:
+ stride, names = int(meta['stride']), eval(meta['names'])
+ elif xml: # OpenVINO
+ LOGGER.info(f'Loading {w} for OpenVINO inference...')
+ check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
+ from openvino.runtime import Core, Layout, get_batch
+ ie = Core()
+ if not Path(w).is_file(): # if not *.xml
+ w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
+ network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
+ if network.get_parameters()[0].get_layout().empty:
+ network.get_parameters()[0].set_layout(Layout('NCHW'))
+ batch_dim = get_batch(network)
+ if batch_dim.is_static:
+ batch_size = batch_dim.get_length()
+ executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2
+ stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
+ elif engine: # TensorRT
+ LOGGER.info(f'Loading {w} for TensorRT inference...')
+ import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
+ check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
+ if device.type == 'cpu':
+ device = torch.device('cuda:0')
+ Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
+ logger = trt.Logger(trt.Logger.INFO)
+ with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
+ model = runtime.deserialize_cuda_engine(f.read())
+ context = model.create_execution_context()
+ bindings = OrderedDict()
+ output_names = []
+ fp16 = False # default updated below
+ dynamic = False
+ for i in range(model.num_bindings):
+ name = model.get_binding_name(i)
+ dtype = trt.nptype(model.get_binding_dtype(i))
+ if model.binding_is_input(i):
+ if -1 in tuple(model.get_binding_shape(i)): # dynamic
+ dynamic = True
+ context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))
+ if dtype == np.float16:
+ fp16 = True
+ else: # output
+ output_names.append(name)
+ shape = tuple(context.get_binding_shape(i))
+ im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
+ bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
+ binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
+ batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size
+ elif coreml: # CoreML
+ LOGGER.info(f'Loading {w} for CoreML inference...')
+ import coremltools as ct
+ model = ct.models.MLModel(w)
+ elif saved_model: # TF SavedModel
+ LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
+ import tensorflow as tf
+ keras = False # assume TF1 saved_model
+ model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
+ elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
+ LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
+ import tensorflow as tf
+
+ def wrap_frozen_graph(gd, inputs, outputs):
+ x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped
+ ge = x.graph.as_graph_element
+ return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
+
+ def gd_outputs(gd):
+ name_list, input_list = [], []
+ for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef
+ name_list.append(node.name)
+ input_list.extend(node.input)
+ return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))
+
+ gd = tf.Graph().as_graph_def() # TF GraphDef
+ with open(w, 'rb') as f:
+ gd.ParseFromString(f.read())
+ frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))
+ elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
+ try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
+ from tflite_runtime.interpreter import Interpreter, load_delegate
+ except ImportError:
+ import tensorflow as tf
+ Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
+ if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
+ LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
+ delegate = {
+ 'Linux': 'libedgetpu.so.1',
+ 'Darwin': 'libedgetpu.1.dylib',
+ 'Windows': 'edgetpu.dll'}[platform.system()]
+ interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
+ else: # TFLite
+ LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
+ interpreter = Interpreter(model_path=w) # load TFLite model
+ interpreter.allocate_tensors() # allocate
+ input_details = interpreter.get_input_details() # inputs
+ output_details = interpreter.get_output_details() # outputs
+ # load metadata
+ with contextlib.suppress(zipfile.BadZipFile):
+ with zipfile.ZipFile(w, 'r') as model:
+ meta_file = model.namelist()[0]
+ meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))
+ stride, names = int(meta['stride']), meta['names']
+ elif tfjs: # TF.js
+ raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')
+ elif paddle: # PaddlePaddle
+ LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
+ check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
+ import paddle.inference as pdi
+ if not Path(w).is_file(): # if not *.pdmodel
+ w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
+ weights = Path(w).with_suffix('.pdiparams')
+ config = pdi.Config(str(w), str(weights))
+ if cuda:
+ config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
+ predictor = pdi.create_predictor(config)
+ input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
+ output_names = predictor.get_output_names()
+ elif triton: # NVIDIA Triton Inference Server
+ LOGGER.info(f'Using {w} as Triton Inference Server...')
+ check_requirements('tritonclient[all]')
+ from utils.triton import TritonRemoteModel
+ model = TritonRemoteModel(url=w)
+ nhwc = model.runtime.startswith('tensorflow')
+ else:
+ raise NotImplementedError(f'ERROR: {w} is not a supported format')
+
+ # class names
+ if 'names' not in locals():
+ names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}
+ if names[0] == 'n01440764' and len(names) == 1000: # ImageNet
+ names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names
+
+ self.__dict__.update(locals()) # assign all variables to self
+
+ def forward(self, im, augment=False, visualize=False):
+ # YOLOv5 MultiBackend inference
+ b, ch, h, w = im.shape # batch, channel, height, width
+ if self.fp16 and im.dtype != torch.float16:
+ im = im.half() # to FP16
+ if self.nhwc:
+ im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
+
+ if self.pt: # PyTorch
+ y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
+ elif self.jit: # TorchScript
+ y = self.model(im)
+ elif self.dnn: # ONNX OpenCV DNN
+ im = im.cpu().numpy() # torch to numpy
+ self.net.setInput(im)
+ y = self.net.forward()
+ elif self.onnx: # ONNX Runtime
+ im = im.cpu().numpy() # torch to numpy
+ y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
+ elif self.xml: # OpenVINO
+ im = im.cpu().numpy() # FP32
+ y = list(self.executable_network([im]).values())
+ elif self.engine: # TensorRT
+ if self.dynamic and im.shape != self.bindings['images'].shape:
+ i = self.model.get_binding_index('images')
+ self.context.set_binding_shape(i, im.shape) # reshape if dynamic
+ self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)
+ for name in self.output_names:
+ i = self.model.get_binding_index(name)
+ self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
+ s = self.bindings['images'].shape
+ assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
+ self.binding_addrs['images'] = int(im.data_ptr())
+ self.context.execute_v2(list(self.binding_addrs.values()))
+ y = [self.bindings[x].data for x in sorted(self.output_names)]
+ elif self.coreml: # CoreML
+ im = im.cpu().numpy()
+ im = Image.fromarray((im[0] * 255).astype('uint8'))
+ # im = im.resize((192, 320), Image.ANTIALIAS)
+ y = self.model.predict({'image': im}) # coordinates are xywh normalized
+ if 'confidence' in y:
+ box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
+ conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
+ y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
+ else:
+ y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)
+ elif self.paddle: # PaddlePaddle
+ im = im.cpu().numpy().astype(np.float32)
+ self.input_handle.copy_from_cpu(im)
+ self.predictor.run()
+ y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
+ elif self.triton: # NVIDIA Triton Inference Server
+ y = self.model(im)
+ else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
+ im = im.cpu().numpy()
+ if self.saved_model: # SavedModel
+ y = self.model(im, training=False) if self.keras else self.model(im)
+ elif self.pb: # GraphDef
+ y = self.frozen_func(x=self.tf.constant(im))
+ else: # Lite or Edge TPU
+ input = self.input_details[0]
+ int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
+ if int8:
+ scale, zero_point = input['quantization']
+ im = (im / scale + zero_point).astype(np.uint8) # de-scale
+ self.interpreter.set_tensor(input['index'], im)
+ self.interpreter.invoke()
+ y = []
+ for output in self.output_details:
+ x = self.interpreter.get_tensor(output['index'])
+ if int8:
+ scale, zero_point = output['quantization']
+ x = (x.astype(np.float32) - zero_point) * scale # re-scale
+ y.append(x)
+ y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
+ y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
+
+ if isinstance(y, (list, tuple)):
+ return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
+ else:
+ return self.from_numpy(y)
+
+ def from_numpy(self, x):
+ return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
+
+ def warmup(self, imgsz=(1, 3, 640, 640)):
+ # Warmup model by running inference once
+ warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton
+ if any(warmup_types) and (self.device.type != 'cpu' or self.triton):
+ im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
+ for _ in range(2 if self.jit else 1): #
+ self.forward(im) # warmup
+
+ @staticmethod
+ def _model_type(p='path/to/model.pt'):
+ # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
+ # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
+ from export import export_formats
+ from utils.downloads import is_url
+ sf = list(export_formats().Suffix) # export suffixes
+ if not is_url(p, check=False):
+ check_suffix(p, sf) # checks
+ url = urlparse(p) # if url may be Triton inference server
+ types = [s in Path(p).name for s in sf]
+ types[8] &= not types[9] # tflite &= not edgetpu
+ triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])
+ return types + [triton]
+
+ @staticmethod
+ def _load_metadata(f=Path('path/to/meta.yaml')):
+ # Load metadata from meta.yaml if it exists
+ if f.exists():
+ d = yaml_load(f)
+ return d['stride'], d['names'] # assign stride, names
+ return None, None
+
+
+class AutoShape(nn.Module):
+ # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
+ conf = 0.25 # NMS confidence threshold
+ iou = 0.45 # NMS IoU threshold
+ agnostic = False # NMS class-agnostic
+ multi_label = False # NMS multiple labels per box
+ classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
+ max_det = 1000 # maximum number of detections per image
+ amp = False # Automatic Mixed Precision (AMP) inference
+
+ def __init__(self, model, verbose=True):
+ super().__init__()
+ if verbose:
+ LOGGER.info('Adding AutoShape... ')
+ copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes
+ self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance
+ self.pt = not self.dmb or model.pt # PyTorch model
+ self.model = model.eval()
+ if self.pt:
+ m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
+ m.inplace = False # Detect.inplace=False for safe multithread inference
+ m.export = True # do not output loss values
+
+ def _apply(self, fn):
+ # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
+ self = super()._apply(fn)
+ if self.pt:
+ m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
+ m.stride = fn(m.stride)
+ m.grid = list(map(fn, m.grid))
+ if isinstance(m.anchor_grid, list):
+ m.anchor_grid = list(map(fn, m.anchor_grid))
+ return self
+
+ @smart_inference_mode()
+ def forward(self, ims, size=640, augment=False, profile=False):
+ # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:
+ # file: ims = 'data/images/zidane.jpg' # str or PosixPath
+ # URI: = 'https://ultralytics.com/images/zidane.jpg'
+ # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
+ # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
+ # numpy: = np.zeros((640,1280,3)) # HWC
+ # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
+ # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
+
+ dt = (Profile(), Profile(), Profile())
+ with dt[0]:
+ if isinstance(size, int): # expand
+ size = (size, size)
+ p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
+ autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
+ if isinstance(ims, torch.Tensor): # torch
+ with amp.autocast(autocast):
+ return self.model(ims.to(p.device).type_as(p), augment=augment) # inference
+
+ # Pre-process
+ n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images
+ shape0, shape1, files = [], [], [] # image and inference shapes, filenames
+ for i, im in enumerate(ims):
+ f = f'image{i}' # filename
+ if isinstance(im, (str, Path)): # filename or uri
+ im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
+ im = np.asarray(exif_transpose(im))
+ elif isinstance(im, Image.Image): # PIL Image
+ im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
+ files.append(Path(f).with_suffix('.jpg').name)
+ if im.shape[0] < 5: # image in CHW
+ im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
+ im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input
+ s = im.shape[:2] # HWC
+ shape0.append(s) # image shape
+ g = max(size) / max(s) # gain
+ shape1.append([int(y * g) for y in s])
+ ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
+ shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape
+ x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad
+ x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
+ x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
+
+ with amp.autocast(autocast):
+ # Inference
+ with dt[1]:
+ y = self.model(x, augment=augment) # forward
+
+ # Post-process
+ with dt[2]:
+ y = non_max_suppression(y if self.dmb else y[0],
+ self.conf,
+ self.iou,
+ self.classes,
+ self.agnostic,
+ self.multi_label,
+ max_det=self.max_det) # NMS
+ for i in range(n):
+ scale_boxes(shape1, y[i][:, :4], shape0[i])
+
+ return Detections(ims, y, files, dt, self.names, x.shape)
+
+
+class Detections:
+ # YOLOv5 detections class for inference results
+ def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
+ super().__init__()
+ d = pred[0].device # device
+ gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
+ self.ims = ims # list of images as numpy arrays
+ self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
+ self.names = names # class names
+ self.files = files # image filenames
+ self.times = times # profiling times
+ self.xyxy = pred # xyxy pixels
+ self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
+ self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
+ self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
+ self.n = len(self.pred) # number of images (batch size)
+ self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms)
+ self.s = tuple(shape) # inference BCHW shape
+
+ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):
+ s, crops = '', []
+ for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
+ s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
+ if pred.shape[0]:
+ for c in pred[:, -1].unique():
+ n = (pred[:, -1] == c).sum() # detections per class
+ s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
+ s = s.rstrip(', ')
+ if show or save or render or crop:
+ annotator = Annotator(im, example=str(self.names))
+ for *box, conf, cls in reversed(pred): # xyxy, confidence, class
+ label = f'{self.names[int(cls)]} {conf:.2f}'
+ if crop:
+ file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
+ crops.append({
+ 'box': box,
+ 'conf': conf,
+ 'cls': cls,
+ 'label': label,
+ 'im': save_one_box(box, im, file=file, save=save)})
+ else: # all others
+ annotator.box_label(box, label if labels else '', color=colors(cls))
+ im = annotator.im
+ else:
+ s += '(no detections)'
+
+ im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
+ if show:
+ if is_jupyter():
+ from IPython.display import display
+ display(im)
+ else:
+ im.show(self.files[i])
+ if save:
+ f = self.files[i]
+ im.save(save_dir / f) # save
+ if i == self.n - 1:
+ LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
+ if render:
+ self.ims[i] = np.asarray(im)
+ if pprint:
+ s = s.lstrip('\n')
+ return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t
+ if crop:
+ if save:
+ LOGGER.info(f'Saved results to {save_dir}\n')
+ return crops
+
+ @TryExcept('Showing images is not supported in this environment')
+ def show(self, labels=True):
+ self._run(show=True, labels=labels) # show results
+
+ def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False):
+ save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir
+ self._run(save=True, labels=labels, save_dir=save_dir) # save results
+
+ def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False):
+ save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
+ return self._run(crop=True, save=save, save_dir=save_dir) # crop results
+
+ def render(self, labels=True):
+ self._run(render=True, labels=labels) # render results
+ return self.ims
+
+ def pandas(self):
+ # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
+ new = copy(self) # return copy
+ ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
+ cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
+ for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
+ a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
+ setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
+ return new
+
+ def tolist(self):
+ # return a list of Detections objects, i.e. 'for result in results.tolist():'
+ r = range(self.n) # iterable
+ x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
+ # for d in x:
+ # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
+ # setattr(d, k, getattr(d, k)[0]) # pop out of list
+ return x
+
+ def print(self):
+ LOGGER.info(self.__str__())
+
+ def __len__(self): # override len(results)
+ return self.n
+
+ def __str__(self): # override print(results)
+ return self._run(pprint=True) # print results
+
+ def __repr__(self):
+ return f'YOLOv5 {self.__class__} instance\n' + self.__str__()
+
+
+class Proto(nn.Module):
+ # YOLOv5 mask Proto module for segmentation models
+ def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks
+ super().__init__()
+ self.cv1 = Conv(c1, c_, k=3)
+ self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
+ self.cv2 = Conv(c_, c_, k=3)
+ self.cv3 = Conv(c_, c2)
+
+ def forward(self, x):
+ return self.cv3(self.cv2(self.upsample(self.cv1(x))))
+
+
+class Classify(nn.Module):
+ # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
+ def __init__(self,
+ c1,
+ c2,
+ k=1,
+ s=1,
+ p=None,
+ g=1,
+ dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability
+ super().__init__()
+ c_ = 1280 # efficientnet_b0 size
+ self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
+ self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
+ self.drop = nn.Dropout(p=dropout_p, inplace=True)
+ self.linear = nn.Linear(c_, c2) # to x(b,c2)
+
+ def forward(self, x):
+ if isinstance(x, list):
+ x = torch.cat(x, 1)
+ return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
diff --git a/models/experimental.py b/models/experimental.py
new file mode 100644
index 0000000000000000000000000000000000000000..02d35b9ebd11d3407d64ae436142aca6100c9084
--- /dev/null
+++ b/models/experimental.py
@@ -0,0 +1,111 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Experimental modules
+"""
+import math
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+from utils.downloads import attempt_download
+
+
+class Sum(nn.Module):
+ # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
+ def __init__(self, n, weight=False): # n: number of inputs
+ super().__init__()
+ self.weight = weight # apply weights boolean
+ self.iter = range(n - 1) # iter object
+ if weight:
+ self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
+
+ def forward(self, x):
+ y = x[0] # no weight
+ if self.weight:
+ w = torch.sigmoid(self.w) * 2
+ for i in self.iter:
+ y = y + x[i + 1] * w[i]
+ else:
+ for i in self.iter:
+ y = y + x[i + 1]
+ return y
+
+
+class MixConv2d(nn.Module):
+ # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
+ def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
+ super().__init__()
+ n = len(k) # number of convolutions
+ if equal_ch: # equal c_ per group
+ i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
+ c_ = [(i == g).sum() for g in range(n)] # intermediate channels
+ else: # equal weight.numel() per group
+ b = [c2] + [0] * n
+ a = np.eye(n + 1, n, k=-1)
+ a -= np.roll(a, 1, axis=1)
+ a *= np.array(k) ** 2
+ a[0] = 1
+ c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
+
+ self.m = nn.ModuleList([
+ nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
+ self.bn = nn.BatchNorm2d(c2)
+ self.act = nn.SiLU()
+
+ def forward(self, x):
+ return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
+
+
+class Ensemble(nn.ModuleList):
+ # Ensemble of models
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, x, augment=False, profile=False, visualize=False):
+ y = [module(x, augment, profile, visualize)[0] for module in self]
+ # y = torch.stack(y).max(0)[0] # max ensemble
+ # y = torch.stack(y).mean(0) # mean ensemble
+ y = torch.cat(y, 1) # nms ensemble
+ return y, None # inference, train output
+
+
+def attempt_load(weights, device=None, inplace=True, fuse=True):
+ # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
+ from models.yolo import Detect, Model
+
+ model = Ensemble()
+ for w in weights if isinstance(weights, list) else [weights]:
+ ckpt = torch.load(attempt_download(w), map_location='cpu') # load
+ ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
+
+ # Model compatibility updates
+ if not hasattr(ckpt, 'stride'):
+ ckpt.stride = torch.tensor([32.])
+ if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
+ ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
+
+ model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
+
+ # Module compatibility updates
+ for m in model.modules():
+ t = type(m)
+ if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
+ m.inplace = inplace # torch 1.7.0 compatibility
+ if t is Detect and not isinstance(m.anchor_grid, list):
+ delattr(m, 'anchor_grid')
+ setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
+ elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
+ m.recompute_scale_factor = None # torch 1.11.0 compatibility
+
+ # Return model
+ if len(model) == 1:
+ return model[-1]
+
+ # Return detection ensemble
+ print(f'Ensemble created with {weights}\n')
+ for k in 'names', 'nc', 'yaml':
+ setattr(model, k, getattr(model[0], k))
+ model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
+ assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
+ return model
diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e4d7beb06e07f295eaf58b1ebb2430a67997d2d4
--- /dev/null
+++ b/models/hub/anchors.yaml
@@ -0,0 +1,59 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+# Default anchors for COCO data
+
+
+# P5 -------------------------------------------------------------------------------------------------------------------
+# P5-640:
+anchors_p5_640:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+
+# P6 -------------------------------------------------------------------------------------------------------------------
+# P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
+anchors_p6_640:
+ - [9,11, 21,19, 17,41] # P3/8
+ - [43,32, 39,70, 86,64] # P4/16
+ - [65,131, 134,130, 120,265] # P5/32
+ - [282,180, 247,354, 512,387] # P6/64
+
+# P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
+anchors_p6_1280:
+ - [19,27, 44,40, 38,94] # P3/8
+ - [96,68, 86,152, 180,137] # P4/16
+ - [140,301, 303,264, 238,542] # P5/32
+ - [436,615, 739,380, 925,792] # P6/64
+
+# P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
+anchors_p6_1920:
+ - [28,41, 67,59, 57,141] # P3/8
+ - [144,103, 129,227, 270,205] # P4/16
+ - [209,452, 455,396, 358,812] # P5/32
+ - [653,922, 1109,570, 1387,1187] # P6/64
+
+
+# P7 -------------------------------------------------------------------------------------------------------------------
+# P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
+anchors_p7_640:
+ - [11,11, 13,30, 29,20] # P3/8
+ - [30,46, 61,38, 39,92] # P4/16
+ - [78,80, 146,66, 79,163] # P5/32
+ - [149,150, 321,143, 157,303] # P6/64
+ - [257,402, 359,290, 524,372] # P7/128
+
+# P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
+anchors_p7_1280:
+ - [19,22, 54,36, 32,77] # P3/8
+ - [70,83, 138,71, 75,173] # P4/16
+ - [165,159, 148,334, 375,151] # P5/32
+ - [334,317, 251,626, 499,474] # P6/64
+ - [750,326, 534,814, 1079,818] # P7/128
+
+# P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
+anchors_p7_1920:
+ - [29,34, 81,55, 47,115] # P3/8
+ - [105,124, 207,107, 113,259] # P4/16
+ - [247,238, 222,500, 563,227] # P5/32
+ - [501,476, 376,939, 749,711] # P6/64
+ - [1126,489, 801,1222, 1618,1227] # P7/128
diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c66982158ce82d4e4ed7241c469b6f0166f0db49
--- /dev/null
+++ b/models/hub/yolov3-spp.yaml
@@ -0,0 +1,51 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# darknet53 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [32, 3, 1]], # 0
+ [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
+ [-1, 1, Bottleneck, [64]],
+ [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
+ [-1, 2, Bottleneck, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
+ [-1, 8, Bottleneck, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
+ [-1, 8, Bottleneck, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
+ [-1, 4, Bottleneck, [1024]], # 10
+ ]
+
+# YOLOv3-SPP head
+head:
+ [[-1, 1, Bottleneck, [1024, False]],
+ [-1, 1, SPP, [512, [5, 9, 13]]],
+ [-1, 1, Conv, [1024, 3, 1]],
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
+
+ [-2, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
+
+ [-2, 1, Conv, [128, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P3
+ [-1, 1, Bottleneck, [256, False]],
+ [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
+
+ [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b28b443152485e39dcf690d18c403780c898bfab
--- /dev/null
+++ b/models/hub/yolov3-tiny.yaml
@@ -0,0 +1,41 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors:
+ - [10,14, 23,27, 37,58] # P4/16
+ - [81,82, 135,169, 344,319] # P5/32
+
+# YOLOv3-tiny backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [16, 3, 1]], # 0
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
+ [-1, 1, Conv, [32, 3, 1]],
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
+ [-1, 1, Conv, [64, 3, 1]],
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
+ [-1, 1, Conv, [128, 3, 1]],
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
+ [-1, 1, Conv, [256, 3, 1]],
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
+ [-1, 1, Conv, [512, 3, 1]],
+ [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
+ [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
+ ]
+
+# YOLOv3-tiny head
+head:
+ [[-1, 1, Conv, [1024, 3, 1]],
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
+
+ [-2, 1, Conv, [128, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
+ [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
+
+ [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
+ ]
diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d1ef91290a8d261ccaf3a9663802e78b6b4e7542
--- /dev/null
+++ b/models/hub/yolov3.yaml
@@ -0,0 +1,51 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# darknet53 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [32, 3, 1]], # 0
+ [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
+ [-1, 1, Bottleneck, [64]],
+ [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
+ [-1, 2, Bottleneck, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
+ [-1, 8, Bottleneck, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
+ [-1, 8, Bottleneck, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
+ [-1, 4, Bottleneck, [1024]], # 10
+ ]
+
+# YOLOv3 head
+head:
+ [[-1, 1, Bottleneck, [1024, False]],
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, Conv, [1024, 3, 1]],
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
+
+ [-2, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
+
+ [-2, 1, Conv, [128, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P3
+ [-1, 1, Bottleneck, [256, False]],
+ [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
+
+ [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..504815f5cfa03329618c4a1801f16ce68ec666e0
--- /dev/null
+++ b/models/hub/yolov5-bifpn.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 BiFPN head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a23e9c6fbf9f7f00c9e7f2a24bc8513a9d5717ea
--- /dev/null
+++ b/models/hub/yolov5-fpn.yaml
@@ -0,0 +1,42 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 FPN head
+head:
+ [[-1, 3, C3, [1024, False]], # 10 (P5/32-large)
+
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 3, C3, [512, False]], # 14 (P4/16-medium)
+
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 3, C3, [256, False]], # 18 (P3/8-small)
+
+ [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..554117dda59aca4a016b2ff42851d39cdc34f714
--- /dev/null
+++ b/models/hub/yolov5-p2.yaml
@@ -0,0 +1,54 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [128, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 2], 1, Concat, [1]], # cat backbone P2
+ [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall)
+
+ [-1, 1, Conv, [128, 3, 2]],
+ [[-1, 18], 1, Concat, [1]], # cat head P3
+ [-1, 3, C3, [256, False]], # 24 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 27 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 30 (P5/32-large)
+
+ [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5-p34.yaml b/models/hub/yolov5-p34.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dbf0f850083ebf546ae7fc367be029297c174da1
--- /dev/null
+++ b/models/hub/yolov5-p34.yaml
@@ -0,0 +1,41 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.50 # layer channel multiple
+anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
+ [ -1, 3, C3, [ 128 ] ],
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
+ [ -1, 6, C3, [ 256 ] ],
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
+ [ -1, 9, C3, [ 512 ] ],
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
+ [ -1, 3, C3, [ 1024 ] ],
+ [ -1, 1, SPPF, [ 1024, 5 ] ], # 9
+ ]
+
+# YOLOv5 v6.0 head with (P3, P4) outputs
+head:
+ [ [ -1, 1, Conv, [ 512, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
+ [ -1, 3, C3, [ 512, False ] ], # 13
+
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
+ [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)
+
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
+ [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
+ [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium)
+
+ [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4)
+ ]
diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a17202f22044c0546bd9373ea58bd21c06b1d334
--- /dev/null
+++ b/models/hub/yolov5-p6.yaml
@@ -0,0 +1,56 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [768]],
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 11
+ ]
+
+# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs
+head:
+ [[-1, 1, Conv, [768, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
+ [-1, 3, C3, [768, False]], # 15
+
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 19
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 20], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 16], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
+
+ [-1, 1, Conv, [768, 3, 2]],
+ [[-1, 12], 1, Concat, [1]], # cat head P6
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
+
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
+ ]
diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..edd7d13a34a6c40e94d900ecce8ca64ae11bf5a1
--- /dev/null
+++ b/models/hub/yolov5-p7.yaml
@@ -0,0 +1,67 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [768]],
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
+ [-1, 3, C3, [1024]],
+ [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128
+ [-1, 3, C3, [1280]],
+ [-1, 1, SPPF, [1280, 5]], # 13
+ ]
+
+# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs
+head:
+ [[-1, 1, Conv, [1024, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 10], 1, Concat, [1]], # cat backbone P6
+ [-1, 3, C3, [1024, False]], # 17
+
+ [-1, 1, Conv, [768, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
+ [-1, 3, C3, [768, False]], # 21
+
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 25
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 29 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 26], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 32 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 22], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [768, False]], # 35 (P5/32-large)
+
+ [-1, 1, Conv, [768, 3, 2]],
+ [[-1, 18], 1, Concat, [1]], # cat head P6
+ [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge)
+
+ [-1, 1, Conv, [1024, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P7
+ [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge)
+
+ [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7)
+ ]
diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ccfbf900691c5738b4705d2ce7944171b6152c98
--- /dev/null
+++ b/models/hub/yolov5-panet.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 PANet head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..632c2cb699e3cf261da462ec7dd20c0ffb7aaad3
--- /dev/null
+++ b/models/hub/yolov5l6.yaml
@@ -0,0 +1,60 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors:
+ - [19,27, 44,40, 38,94] # P3/8
+ - [96,68, 86,152, 180,137] # P4/16
+ - [140,301, 303,264, 238,542] # P5/32
+ - [436,615, 739,380, 925,792] # P6/64
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [768]],
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 11
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [768, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
+ [-1, 3, C3, [768, False]], # 15
+
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 19
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 20], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 16], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
+
+ [-1, 1, Conv, [768, 3, 2]],
+ [[-1, 12], 1, Concat, [1]], # cat head P6
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
+
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
+ ]
diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ecc53fd68ba6421b4fe63d6693b6563ecaa0e981
--- /dev/null
+++ b/models/hub/yolov5m6.yaml
@@ -0,0 +1,60 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.67 # model depth multiple
+width_multiple: 0.75 # layer channel multiple
+anchors:
+ - [19,27, 44,40, 38,94] # P3/8
+ - [96,68, 86,152, 180,137] # P4/16
+ - [140,301, 303,264, 238,542] # P5/32
+ - [436,615, 739,380, 925,792] # P6/64
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [768]],
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 11
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [768, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
+ [-1, 3, C3, [768, False]], # 15
+
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 19
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 20], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 16], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
+
+ [-1, 1, Conv, [768, 3, 2]],
+ [[-1, 12], 1, Concat, [1]], # cat head P6
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
+
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
+ ]
diff --git a/models/hub/yolov5n6.yaml b/models/hub/yolov5n6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0c0c71d32551789d57e5f44fd936636ecb4e3414
--- /dev/null
+++ b/models/hub/yolov5n6.yaml
@@ -0,0 +1,60 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.25 # layer channel multiple
+anchors:
+ - [19,27, 44,40, 38,94] # P3/8
+ - [96,68, 86,152, 180,137] # P4/16
+ - [140,301, 303,264, 238,542] # P5/32
+ - [436,615, 739,380, 925,792] # P6/64
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [768]],
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 11
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [768, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
+ [-1, 3, C3, [768, False]], # 15
+
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 19
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 20], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 16], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
+
+ [-1, 1, Conv, [768, 3, 2]],
+ [[-1, 12], 1, Concat, [1]], # cat head P6
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
+
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
+ ]
diff --git a/models/hub/yolov5s-LeakyReLU.yaml b/models/hub/yolov5s-LeakyReLU.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3a179bf3311c6f46add3f6fc1b9fcde9ba7ffed7
--- /dev/null
+++ b/models/hub/yolov5s-LeakyReLU.yaml
@@ -0,0 +1,49 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.50 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ff9519c3f1aa354f512ddab8b23e861d0f3de6c6
--- /dev/null
+++ b/models/hub/yolov5s-ghost.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.50 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3Ghost, [128]],
+ [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3Ghost, [256]],
+ [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3Ghost, [512]],
+ [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3Ghost, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, GhostConv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3Ghost, [512, False]], # 13
+
+ [-1, 1, GhostConv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, GhostConv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, GhostConv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..100d7c447527f1116e0edb3e1c096904fe3302f1
--- /dev/null
+++ b/models/hub/yolov5s-transformer.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.50 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a28fb559482b25a41531517a68f08253f08edb0f
--- /dev/null
+++ b/models/hub/yolov5s6.yaml
@@ -0,0 +1,60 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.50 # layer channel multiple
+anchors:
+ - [19,27, 44,40, 38,94] # P3/8
+ - [96,68, 86,152, 180,137] # P4/16
+ - [140,301, 303,264, 238,542] # P5/32
+ - [436,615, 739,380, 925,792] # P6/64
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [768]],
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 11
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [768, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
+ [-1, 3, C3, [768, False]], # 15
+
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 19
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 20], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 16], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
+
+ [-1, 1, Conv, [768, 3, 2]],
+ [[-1, 12], 1, Concat, [1]], # cat head P6
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
+
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
+ ]
diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ba795c4aad319b94db0fb4fd6961e9ef0cac207a
--- /dev/null
+++ b/models/hub/yolov5x6.yaml
@@ -0,0 +1,60 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.33 # model depth multiple
+width_multiple: 1.25 # layer channel multiple
+anchors:
+ - [19,27, 44,40, 38,94] # P3/8
+ - [96,68, 86,152, 180,137] # P4/16
+ - [140,301, 303,264, 238,542] # P5/32
+ - [436,615, 739,380, 925,792] # P6/64
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [768]],
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 11
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [768, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
+ [-1, 3, C3, [768, False]], # 15
+
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 19
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 20], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 16], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
+
+ [-1, 1, Conv, [768, 3, 2]],
+ [[-1, 12], 1, Concat, [1]], # cat head P6
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
+
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
+ ]
diff --git a/models/segment/yolov5l-seg.yaml b/models/segment/yolov5l-seg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4782de11dd2d3940b9a6e977c89ad0da4e5fe05f
--- /dev/null
+++ b/models/segment/yolov5l-seg.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..07ec25ba264db4542aa9baa3e22e077260d6f6e8
--- /dev/null
+++ b/models/segment/yolov5m-seg.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.67 # model depth multiple
+width_multiple: 0.75 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/segment/yolov5n-seg.yaml b/models/segment/yolov5n-seg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c28225ab4a506d2f34b0074ee7f19d6770708934
--- /dev/null
+++ b/models/segment/yolov5n-seg.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.25 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a827814e1399865e5cdd48435131315cce23f2e1
--- /dev/null
+++ b/models/segment/yolov5s-seg.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.5 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/segment/yolov5x-seg.yaml b/models/segment/yolov5x-seg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5d0c4524a99c62bc3eef237338439ea6e1d0b0cf
--- /dev/null
+++ b/models/segment/yolov5x-seg.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.33 # model depth multiple
+width_multiple: 1.25 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/tf.py b/models/tf.py
new file mode 100644
index 0000000000000000000000000000000000000000..8290cf2e57f5a520aab67ae50f003e8dcc7ca7e6
--- /dev/null
+++ b/models/tf.py
@@ -0,0 +1,608 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+TensorFlow, Keras and TFLite versions of YOLOv5
+Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
+
+Usage:
+ $ python models/tf.py --weights yolov5s.pt
+
+Export:
+ $ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs
+"""
+
+import argparse
+import sys
+from copy import deepcopy
+from pathlib import Path
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1] # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+ sys.path.append(str(ROOT)) # add ROOT to PATH
+# ROOT = ROOT.relative_to(Path.cwd()) # relative
+
+import numpy as np
+import tensorflow as tf
+import torch
+import torch.nn as nn
+from tensorflow import keras
+
+from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv,
+ DWConvTranspose2d, Focus, autopad)
+from models.experimental import MixConv2d, attempt_load
+from models.yolo import Detect, Segment
+from utils.activations import SiLU
+from utils.general import LOGGER, make_divisible, print_args
+
+
+class TFBN(keras.layers.Layer):
+ # TensorFlow BatchNormalization wrapper
+ def __init__(self, w=None):
+ super().__init__()
+ self.bn = keras.layers.BatchNormalization(
+ beta_initializer=keras.initializers.Constant(w.bias.numpy()),
+ gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
+ moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),
+ moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),
+ epsilon=w.eps)
+
+ def call(self, inputs):
+ return self.bn(inputs)
+
+
+class TFPad(keras.layers.Layer):
+ # Pad inputs in spatial dimensions 1 and 2
+ def __init__(self, pad):
+ super().__init__()
+ if isinstance(pad, int):
+ self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
+ else: # tuple/list
+ self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]])
+
+ def call(self, inputs):
+ return tf.pad(inputs, self.pad, mode='constant', constant_values=0)
+
+
+class TFConv(keras.layers.Layer):
+ # Standard convolution
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
+ # ch_in, ch_out, weights, kernel, stride, padding, groups
+ super().__init__()
+ assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
+ # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
+ # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch
+ conv = keras.layers.Conv2D(
+ filters=c2,
+ kernel_size=k,
+ strides=s,
+ padding='SAME' if s == 1 else 'VALID',
+ use_bias=not hasattr(w, 'bn'),
+ kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
+ bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
+ self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
+ self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
+ self.act = activations(w.act) if act else tf.identity
+
+ def call(self, inputs):
+ return self.act(self.bn(self.conv(inputs)))
+
+
+class TFDWConv(keras.layers.Layer):
+ # Depthwise convolution
+ def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
+ # ch_in, ch_out, weights, kernel, stride, padding, groups
+ super().__init__()
+ assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels'
+ conv = keras.layers.DepthwiseConv2D(
+ kernel_size=k,
+ depth_multiplier=c2 // c1,
+ strides=s,
+ padding='SAME' if s == 1 else 'VALID',
+ use_bias=not hasattr(w, 'bn'),
+ depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
+ bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
+ self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
+ self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
+ self.act = activations(w.act) if act else tf.identity
+
+ def call(self, inputs):
+ return self.act(self.bn(self.conv(inputs)))
+
+
+class TFDWConvTranspose2d(keras.layers.Layer):
+ # Depthwise ConvTranspose2d
+ def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
+ # ch_in, ch_out, weights, kernel, stride, padding, groups
+ super().__init__()
+ assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels'
+ assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1'
+ weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy()
+ self.c1 = c1
+ self.conv = [
+ keras.layers.Conv2DTranspose(filters=1,
+ kernel_size=k,
+ strides=s,
+ padding='VALID',
+ output_padding=p2,
+ use_bias=True,
+ kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]),
+ bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)]
+
+ def call(self, inputs):
+ return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1]
+
+
+class TFFocus(keras.layers.Layer):
+ # Focus wh information into c-space
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
+ # ch_in, ch_out, kernel, stride, padding, groups
+ super().__init__()
+ self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
+
+ def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
+ # inputs = inputs / 255 # normalize 0-255 to 0-1
+ inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]]
+ return self.conv(tf.concat(inputs, 3))
+
+
+class TFBottleneck(keras.layers.Layer):
+ # Standard bottleneck
+ def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
+ super().__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+ self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
+ self.add = shortcut and c1 == c2
+
+ def call(self, inputs):
+ return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
+
+
+class TFCrossConv(keras.layers.Layer):
+ # Cross Convolution
+ def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
+ super().__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1)
+ self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2)
+ self.add = shortcut and c1 == c2
+
+ def call(self, inputs):
+ return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
+
+
+class TFConv2d(keras.layers.Layer):
+ # Substitution for PyTorch nn.Conv2D
+ def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
+ super().__init__()
+ assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
+ self.conv = keras.layers.Conv2D(filters=c2,
+ kernel_size=k,
+ strides=s,
+ padding='VALID',
+ use_bias=bias,
+ kernel_initializer=keras.initializers.Constant(
+ w.weight.permute(2, 3, 1, 0).numpy()),
+ bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None)
+
+ def call(self, inputs):
+ return self.conv(inputs)
+
+
+class TFBottleneckCSP(keras.layers.Layer):
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
+ # ch_in, ch_out, number, shortcut, groups, expansion
+ super().__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+ self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
+ self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)
+ self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)
+ self.bn = TFBN(w.bn)
+ self.act = lambda x: keras.activations.swish(x)
+ self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
+
+ def call(self, inputs):
+ y1 = self.cv3(self.m(self.cv1(inputs)))
+ y2 = self.cv2(inputs)
+ return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))
+
+
+class TFC3(keras.layers.Layer):
+ # CSP Bottleneck with 3 convolutions
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
+ # ch_in, ch_out, number, shortcut, groups, expansion
+ super().__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+ self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
+ self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
+ self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
+
+ def call(self, inputs):
+ return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
+
+
+class TFC3x(keras.layers.Layer):
+ # 3 module with cross-convolutions
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
+ # ch_in, ch_out, number, shortcut, groups, expansion
+ super().__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+ self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
+ self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
+ self.m = keras.Sequential([
+ TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)])
+
+ def call(self, inputs):
+ return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
+
+
+class TFSPP(keras.layers.Layer):
+ # Spatial pyramid pooling layer used in YOLOv3-SPP
+ def __init__(self, c1, c2, k=(5, 9, 13), w=None):
+ super().__init__()
+ c_ = c1 // 2 # hidden channels
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+ self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
+ self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k]
+
+ def call(self, inputs):
+ x = self.cv1(inputs)
+ return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))
+
+
+class TFSPPF(keras.layers.Layer):
+ # Spatial pyramid pooling-Fast layer
+ def __init__(self, c1, c2, k=5, w=None):
+ super().__init__()
+ c_ = c1 // 2 # hidden channels
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+ self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
+ self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME')
+
+ def call(self, inputs):
+ x = self.cv1(inputs)
+ y1 = self.m(x)
+ y2 = self.m(y1)
+ return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))
+
+
+class TFDetect(keras.layers.Layer):
+ # TF YOLOv5 Detect layer
+ def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
+ super().__init__()
+ self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
+ self.nc = nc # number of classes
+ self.no = nc + 5 # number of outputs per anchor
+ self.nl = len(anchors) # number of detection layers
+ self.na = len(anchors[0]) // 2 # number of anchors
+ self.grid = [tf.zeros(1)] * self.nl # init grid
+ self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)
+ self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2])
+ self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]
+ self.training = False # set to False after building model
+ self.imgsz = imgsz
+ for i in range(self.nl):
+ ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
+ self.grid[i] = self._make_grid(nx, ny)
+
+ def call(self, inputs):
+ z = [] # inference output
+ x = []
+ for i in range(self.nl):
+ x.append(self.m[i](inputs[i]))
+ # x(bs,20,20,255) to x(bs,3,20,20,85)
+ ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
+ x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no])
+
+ if not self.training: # inference
+ y = x[i]
+ grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5
+ anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4
+ xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i] # xy
+ wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid
+ # Normalize xywh to 0-1 to reduce calibration error
+ xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
+ wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
+ y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1)
+ z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
+
+ return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),)
+
+ @staticmethod
+ def _make_grid(nx=20, ny=20):
+ # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
+ # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
+ xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
+ return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)
+
+
+class TFSegment(TFDetect):
+ # YOLOv5 Segment head for segmentation models
+ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None):
+ super().__init__(nc, anchors, ch, imgsz, w)
+ self.nm = nm # number of masks
+ self.npr = npr # number of protos
+ self.no = 5 + nc + self.nm # number of outputs per anchor
+ self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv
+ self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos
+ self.detect = TFDetect.call
+
+ def call(self, x):
+ p = self.proto(x[0])
+ # p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos
+ p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160)
+ x = self.detect(self, x)
+ return (x, p) if self.training else (x[0], p)
+
+
+class TFProto(keras.layers.Layer):
+
+ def __init__(self, c1, c_=256, c2=32, w=None):
+ super().__init__()
+ self.cv1 = TFConv(c1, c_, k=3, w=w.cv1)
+ self.upsample = TFUpsample(None, scale_factor=2, mode='nearest')
+ self.cv2 = TFConv(c_, c_, k=3, w=w.cv2)
+ self.cv3 = TFConv(c_, c2, w=w.cv3)
+
+ def call(self, inputs):
+ return self.cv3(self.cv2(self.upsample(self.cv1(inputs))))
+
+
+class TFUpsample(keras.layers.Layer):
+ # TF version of torch.nn.Upsample()
+ def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
+ super().__init__()
+ assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2'
+ self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode)
+ # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
+ # with default arguments: align_corners=False, half_pixel_centers=False
+ # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
+ # size=(x.shape[1] * 2, x.shape[2] * 2))
+
+ def call(self, inputs):
+ return self.upsample(inputs)
+
+
+class TFConcat(keras.layers.Layer):
+ # TF version of torch.concat()
+ def __init__(self, dimension=1, w=None):
+ super().__init__()
+ assert dimension == 1, 'convert only NCHW to NHWC concat'
+ self.d = 3
+
+ def call(self, inputs):
+ return tf.concat(inputs, self.d)
+
+
+def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
+ LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
+ anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
+ na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
+ no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
+
+ layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
+ for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
+ m_str = m
+ m = eval(m) if isinstance(m, str) else m # eval strings
+ for j, a in enumerate(args):
+ try:
+ args[j] = eval(a) if isinstance(a, str) else a # eval strings
+ except NameError:
+ pass
+
+ n = max(round(n * gd), 1) if n > 1 else n # depth gain
+ if m in [
+ nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv,
+ BottleneckCSP, C3, C3x]:
+ c1, c2 = ch[f], args[0]
+ c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
+
+ args = [c1, c2, *args[1:]]
+ if m in [BottleneckCSP, C3, C3x]:
+ args.insert(2, n)
+ n = 1
+ elif m is nn.BatchNorm2d:
+ args = [ch[f]]
+ elif m is Concat:
+ c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
+ elif m in [Detect, Segment]:
+ args.append([ch[x + 1] for x in f])
+ if isinstance(args[1], int): # number of anchors
+ args[1] = [list(range(args[1] * 2))] * len(f)
+ if m is Segment:
+ args[3] = make_divisible(args[3] * gw, 8)
+ args.append(imgsz)
+ else:
+ c2 = ch[f]
+
+ tf_m = eval('TF' + m_str.replace('nn.', ''))
+ m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
+ else tf_m(*args, w=model.model[i]) # module
+
+ torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
+ t = str(m)[8:-2].replace('__main__.', '') # module type
+ np = sum(x.numel() for x in torch_m_.parameters()) # number params
+ m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
+ LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print
+ save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
+ layers.append(m_)
+ ch.append(c2)
+ return keras.Sequential(layers), sorted(save)
+
+
+class TFModel:
+ # TF YOLOv5 model
+ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
+ super().__init__()
+ if isinstance(cfg, dict):
+ self.yaml = cfg # model dict
+ else: # is *.yaml
+ import yaml # for torch hub
+ self.yaml_file = Path(cfg).name
+ with open(cfg) as f:
+ self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
+
+ # Define model
+ if nc and nc != self.yaml['nc']:
+ LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
+ self.yaml['nc'] = nc # override yaml value
+ self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
+
+ def predict(self,
+ inputs,
+ tf_nms=False,
+ agnostic_nms=False,
+ topk_per_class=100,
+ topk_all=100,
+ iou_thres=0.45,
+ conf_thres=0.25):
+ y = [] # outputs
+ x = inputs
+ for m in self.model.layers:
+ if m.f != -1: # if not from previous layer
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
+
+ x = m(x) # run
+ y.append(x if m.i in self.savelist else None) # save output
+
+ # Add TensorFlow NMS
+ if tf_nms:
+ boxes = self._xywh2xyxy(x[0][..., :4])
+ probs = x[0][:, :, 4:5]
+ classes = x[0][:, :, 5:]
+ scores = probs * classes
+ if agnostic_nms:
+ nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres)
+ else:
+ boxes = tf.expand_dims(boxes, 2)
+ nms = tf.image.combined_non_max_suppression(boxes,
+ scores,
+ topk_per_class,
+ topk_all,
+ iou_thres,
+ conf_thres,
+ clip_boxes=False)
+ return (nms,)
+ return x # output [1,6300,85] = [xywh, conf, class0, class1, ...]
+ # x = x[0] # [x(1,6300,85), ...] to x(6300,85)
+ # xywh = x[..., :4] # x(6300,4) boxes
+ # conf = x[..., 4:5] # x(6300,1) confidences
+ # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes
+ # return tf.concat([conf, cls, xywh], 1)
+
+ @staticmethod
+ def _xywh2xyxy(xywh):
+ # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
+ x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)
+ return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
+
+
+class AgnosticNMS(keras.layers.Layer):
+ # TF Agnostic NMS
+ def call(self, input, topk_all, iou_thres, conf_thres):
+ # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450
+ return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres),
+ input,
+ fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),
+ name='agnostic_nms')
+
+ @staticmethod
+ def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS
+ boxes, classes, scores = x
+ class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)
+ scores_inp = tf.reduce_max(scores, -1)
+ selected_inds = tf.image.non_max_suppression(boxes,
+ scores_inp,
+ max_output_size=topk_all,
+ iou_threshold=iou_thres,
+ score_threshold=conf_thres)
+ selected_boxes = tf.gather(boxes, selected_inds)
+ padded_boxes = tf.pad(selected_boxes,
+ paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],
+ mode='CONSTANT',
+ constant_values=0.0)
+ selected_scores = tf.gather(scores_inp, selected_inds)
+ padded_scores = tf.pad(selected_scores,
+ paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
+ mode='CONSTANT',
+ constant_values=-1.0)
+ selected_classes = tf.gather(class_inds, selected_inds)
+ padded_classes = tf.pad(selected_classes,
+ paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
+ mode='CONSTANT',
+ constant_values=-1.0)
+ valid_detections = tf.shape(selected_inds)[0]
+ return padded_boxes, padded_scores, padded_classes, valid_detections
+
+
+def activations(act=nn.SiLU):
+ # Returns TF activation from input PyTorch activation
+ if isinstance(act, nn.LeakyReLU):
+ return lambda x: keras.activations.relu(x, alpha=0.1)
+ elif isinstance(act, nn.Hardswish):
+ return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667
+ elif isinstance(act, (nn.SiLU, SiLU)):
+ return lambda x: keras.activations.swish(x)
+ else:
+ raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}')
+
+
+def representative_dataset_gen(dataset, ncalib=100):
+ # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays
+ for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):
+ im = np.transpose(img, [1, 2, 0])
+ im = np.expand_dims(im, axis=0).astype(np.float32)
+ im /= 255
+ yield [im]
+ if n >= ncalib:
+ break
+
+
+def run(
+ weights=ROOT / 'yolov5s.pt', # weights path
+ imgsz=(640, 640), # inference size h,w
+ batch_size=1, # batch size
+ dynamic=False, # dynamic batch size
+):
+ # PyTorch model
+ im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
+ model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False)
+ _ = model(im) # inference
+ model.info()
+
+ # TensorFlow model
+ im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image
+ tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
+ _ = tf_model.predict(im) # inference
+
+ # Keras model
+ im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
+ keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))
+ keras_model.summary()
+
+ LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.')
+
+
+def parse_opt():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
+ parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
+ parser.add_argument('--batch-size', type=int, default=1, help='batch size')
+ parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')
+ opt = parser.parse_args()
+ opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
+ print_args(vars(opt))
+ return opt
+
+
+def main(opt):
+ run(**vars(opt))
+
+
+if __name__ == '__main__':
+ opt = parse_opt()
+ main(opt)
diff --git a/models/yolo.py b/models/yolo.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed21c067ee9337bf534bfc908574362a61ad3207
--- /dev/null
+++ b/models/yolo.py
@@ -0,0 +1,391 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+YOLO-specific modules
+
+Usage:
+ $ python models/yolo.py --cfg yolov5s.yaml
+"""
+
+import argparse
+import contextlib
+import os
+import platform
+import sys
+from copy import deepcopy
+from pathlib import Path
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1] # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+ sys.path.append(str(ROOT)) # add ROOT to PATH
+if platform.system() != 'Windows':
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
+
+from models.common import *
+from models.experimental import *
+from utils.autoanchor import check_anchor_order
+from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
+from utils.plots import feature_visualization
+from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
+ time_sync)
+
+try:
+ import thop # for FLOPs computation
+except ImportError:
+ thop = None
+
+
+class Detect(nn.Module):
+ # YOLOv5 Detect head for detection models
+ stride = None # strides computed during build
+ dynamic = False # force grid reconstruction
+ export = False # export mode
+
+ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
+ super().__init__()
+ self.nc = nc # number of classes
+ self.no = nc + 5 # number of outputs per anchor
+ self.nl = len(anchors) # number of detection layers
+ self.na = len(anchors[0]) // 2 # number of anchors
+ self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid
+ self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid
+ self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
+ self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
+ self.inplace = inplace # use inplace ops (e.g. slice assignment)
+
+ def forward(self, x):
+ z = [] # inference output
+ for i in range(self.nl):
+ x[i] = self.m[i](x[i]) # conv
+ bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
+ x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
+
+ if not self.training: # inference
+ if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
+ self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
+
+ if isinstance(self, Segment): # (boxes + masks)
+ xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
+ xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy
+ wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh
+ y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
+ else: # Detect (boxes only)
+ xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
+ xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
+ wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
+ y = torch.cat((xy, wh, conf), 4)
+ z.append(y.view(bs, self.na * nx * ny, self.no))
+
+ return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
+
+ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
+ d = self.anchors[i].device
+ t = self.anchors[i].dtype
+ shape = 1, self.na, ny, nx, 2 # grid shape
+ y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
+ yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
+ grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
+ anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
+ return grid, anchor_grid
+
+
+class Segment(Detect):
+ # YOLOv5 Segment head for segmentation models
+ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
+ super().__init__(nc, anchors, ch, inplace)
+ self.nm = nm # number of masks
+ self.npr = npr # number of protos
+ self.no = 5 + nc + self.nm # number of outputs per anchor
+ self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
+ self.proto = Proto(ch[0], self.npr, self.nm) # protos
+ self.detect = Detect.forward
+
+ def forward(self, x):
+ p = self.proto(x[0])
+ x = self.detect(self, x)
+ return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
+
+
+class BaseModel(nn.Module):
+ # YOLOv5 base model
+ def forward(self, x, profile=False, visualize=False):
+ return self._forward_once(x, profile, visualize) # single-scale inference, train
+
+ def _forward_once(self, x, profile=False, visualize=False):
+ y, dt = [], [] # outputs
+ for m in self.model:
+ if m.f != -1: # if not from previous layer
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
+ if profile:
+ self._profile_one_layer(m, x, dt)
+ x = m(x) # run
+ y.append(x if m.i in self.save else None) # save output
+ if visualize:
+ feature_visualization(x, m.type, m.i, save_dir=visualize)
+ return x
+
+ def _profile_one_layer(self, m, x, dt):
+ c = m == self.model[-1] # is final layer, copy input as inplace fix
+ o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
+ t = time_sync()
+ for _ in range(10):
+ m(x.copy() if c else x)
+ dt.append((time_sync() - t) * 100)
+ if m == self.model[0]:
+ LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
+ LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
+ if c:
+ LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
+
+ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
+ LOGGER.info('Fusing layers... ')
+ for m in self.model.modules():
+ if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
+ m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
+ delattr(m, 'bn') # remove batchnorm
+ m.forward = m.forward_fuse # update forward
+ self.info()
+ return self
+
+ def info(self, verbose=False, img_size=640): # print model information
+ model_info(self, verbose, img_size)
+
+ def _apply(self, fn):
+ # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
+ self = super()._apply(fn)
+ m = self.model[-1] # Detect()
+ if isinstance(m, (Detect, Segment)):
+ m.stride = fn(m.stride)
+ m.grid = list(map(fn, m.grid))
+ if isinstance(m.anchor_grid, list):
+ m.anchor_grid = list(map(fn, m.anchor_grid))
+ return self
+
+
+class DetectionModel(BaseModel):
+ # YOLOv5 detection model
+ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
+ super().__init__()
+ if isinstance(cfg, dict):
+ self.yaml = cfg # model dict
+ else: # is *.yaml
+ import yaml # for torch hub
+ self.yaml_file = Path(cfg).name
+ with open(cfg, encoding='ascii', errors='ignore') as f:
+ self.yaml = yaml.safe_load(f) # model dict
+
+ # Define model
+ ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
+ if nc and nc != self.yaml['nc']:
+ LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
+ self.yaml['nc'] = nc # override yaml value
+ if anchors:
+ LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
+ self.yaml['anchors'] = round(anchors) # override yaml value
+ self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
+ self.names = [str(i) for i in range(self.yaml['nc'])] # default names
+ self.inplace = self.yaml.get('inplace', True)
+
+ # Build strides, anchors
+ m = self.model[-1] # Detect()
+ if isinstance(m, (Detect, Segment)):
+ s = 256 # 2x min stride
+ m.inplace = self.inplace
+ forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
+ m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
+ check_anchor_order(m)
+ m.anchors /= m.stride.view(-1, 1, 1)
+ self.stride = m.stride
+ self._initialize_biases() # only run once
+
+ # Init weights, biases
+ initialize_weights(self)
+ self.info()
+ LOGGER.info('')
+
+ def forward(self, x, augment=False, profile=False, visualize=False):
+ if augment:
+ return self._forward_augment(x) # augmented inference, None
+ return self._forward_once(x, profile, visualize) # single-scale inference, train
+
+ def _forward_augment(self, x):
+ img_size = x.shape[-2:] # height, width
+ s = [1, 0.83, 0.67] # scales
+ f = [None, 3, None] # flips (2-ud, 3-lr)
+ y = [] # outputs
+ for si, fi in zip(s, f):
+ xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
+ yi = self._forward_once(xi)[0] # forward
+ # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
+ yi = self._descale_pred(yi, fi, si, img_size)
+ y.append(yi)
+ y = self._clip_augmented(y) # clip augmented tails
+ return torch.cat(y, 1), None # augmented inference, train
+
+ def _descale_pred(self, p, flips, scale, img_size):
+ # de-scale predictions following augmented inference (inverse operation)
+ if self.inplace:
+ p[..., :4] /= scale # de-scale
+ if flips == 2:
+ p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
+ elif flips == 3:
+ p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
+ else:
+ x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
+ if flips == 2:
+ y = img_size[0] - y # de-flip ud
+ elif flips == 3:
+ x = img_size[1] - x # de-flip lr
+ p = torch.cat((x, y, wh, p[..., 4:]), -1)
+ return p
+
+ def _clip_augmented(self, y):
+ # Clip YOLOv5 augmented inference tails
+ nl = self.model[-1].nl # number of detection layers (P3-P5)
+ g = sum(4 ** x for x in range(nl)) # grid points
+ e = 1 # exclude layer count
+ i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices
+ y[0] = y[0][:, :-i] # large
+ i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices
+ y[-1] = y[-1][:, i:] # small
+ return y
+
+ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
+ # https://arxiv.org/abs/1708.02002 section 3.3
+ # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
+ m = self.model[-1] # Detect() module
+ for mi, s in zip(m.m, m.stride): # from
+ b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
+ b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
+ b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls
+ mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+
+
+Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility
+
+
+class SegmentationModel(DetectionModel):
+ # YOLOv5 segmentation model
+ def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
+ super().__init__(cfg, ch, nc, anchors)
+
+
+class ClassificationModel(BaseModel):
+ # YOLOv5 classification model
+ def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index
+ super().__init__()
+ self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
+
+ def _from_detection_model(self, model, nc=1000, cutoff=10):
+ # Create a YOLOv5 classification model from a YOLOv5 detection model
+ if isinstance(model, DetectMultiBackend):
+ model = model.model # unwrap DetectMultiBackend
+ model.model = model.model[:cutoff] # backbone
+ m = model.model[-1] # last layer
+ ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module
+ c = Classify(ch, nc) # Classify()
+ c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type
+ model.model[-1] = c # replace
+ self.model = model.model
+ self.stride = model.stride
+ self.save = []
+ self.nc = nc
+
+ def _from_yaml(self, cfg):
+ # Create a YOLOv5 classification model from a *.yaml file
+ self.model = None
+
+
+def parse_model(d, ch): # model_dict, input_channels(3)
+ # Parse a YOLOv5 model.yaml dictionary
+ LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
+ anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
+ if act:
+ Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
+ LOGGER.info(f"{colorstr('activation:')} {act}") # print
+ na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
+ no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
+
+ layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
+ for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
+ m = eval(m) if isinstance(m, str) else m # eval strings
+ for j, a in enumerate(args):
+ with contextlib.suppress(NameError):
+ args[j] = eval(a) if isinstance(a, str) else a # eval strings
+
+ n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
+ if m in {
+ Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
+ BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
+ c1, c2 = ch[f], args[0]
+ if c2 != no: # if not output
+ c2 = make_divisible(c2 * gw, 8)
+
+ args = [c1, c2, *args[1:]]
+ if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
+ args.insert(2, n) # number of repeats
+ n = 1
+ elif m is nn.BatchNorm2d:
+ args = [ch[f]]
+ elif m is Concat:
+ c2 = sum(ch[x] for x in f)
+ # TODO: channel, gw, gd
+ elif m in {Detect, Segment}:
+ args.append([ch[x] for x in f])
+ if isinstance(args[1], int): # number of anchors
+ args[1] = [list(range(args[1] * 2))] * len(f)
+ if m is Segment:
+ args[3] = make_divisible(args[3] * gw, 8)
+ elif m is Contract:
+ c2 = ch[f] * args[0] ** 2
+ elif m is Expand:
+ c2 = ch[f] // args[0] ** 2
+ else:
+ c2 = ch[f]
+
+ m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
+ t = str(m)[8:-2].replace('__main__.', '') # module type
+ np = sum(x.numel() for x in m_.parameters()) # number params
+ m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
+ LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
+ save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
+ layers.append(m_)
+ if i == 0:
+ ch = []
+ ch.append(c2)
+ return nn.Sequential(*layers), sorted(save)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
+ parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--profile', action='store_true', help='profile model speed')
+ parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')
+ parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')
+ opt = parser.parse_args()
+ opt.cfg = check_yaml(opt.cfg) # check YAML
+ print_args(vars(opt))
+ device = select_device(opt.device)
+
+ # Create model
+ im = torch.rand(opt.batch_size, 3, 640, 640).to(device)
+ model = Model(opt.cfg).to(device)
+
+ # Options
+ if opt.line_profile: # profile layer by layer
+ model(im, profile=True)
+
+ elif opt.profile: # profile forward-backward
+ results = profile(input=im, ops=[model], n=3)
+
+ elif opt.test: # test all models
+ for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'):
+ try:
+ _ = Model(cfg)
+ except Exception as e:
+ print(f'Error in {cfg}: {e}')
+
+ else: # report fused model summary
+ model.fuse()
diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ce8a5de46a2785f5537c09fe27f3077c057bb4f3
--- /dev/null
+++ b/models/yolov5l.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ad13ab370ff6532931284a0193959afba214f6f4
--- /dev/null
+++ b/models/yolov5m.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.67 # model depth multiple
+width_multiple: 0.75 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/yolov5n.yaml b/models/yolov5n.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a28a40d6e20383727da1a9eed180c9e13ee89fd
--- /dev/null
+++ b/models/yolov5n.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.25 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f35beabb1e1c76f9ec2cad0cb7adbce76f6b7c4c
--- /dev/null
+++ b/models/yolov5s.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.50 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f617a027d8a20a2b7c2a4b415da0941c02aeb3a3
--- /dev/null
+++ b/models/yolov5x.yaml
@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+
+# Parameters
+nc: 80 # number of classes
+depth_multiple: 1.33 # model depth multiple
+width_multiple: 1.25 # layer channel multiple
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 6, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 3, C3, [1024]],
+ [-1, 1, SPPF, [1024, 5]], # 9
+ ]
+
+# YOLOv5 v6.0 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/python/README.md b/python/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..84d362e48aefb1abf08483ea1b2fdb53ad3e7a47
--- /dev/null
+++ b/python/README.md
@@ -0,0 +1,40 @@
+This SDK is developped using C++11 and comes with Python wrappers generated using SWIG.
+
+There is no Python extension in the repository. Generating pre-built extension will force us to choose a specific Python version which means you'll be bound to this decision.
+To avoid being bound to a specific Python version we let the extension build task up to you. Building the extension is very easy and doesn't require any specific skill.
+
+# C++ Compiler #
+You'll need a C++ compiler.
+
+On Windows we recommend Visual Studio 2015 Community or later.
+
+On Linux we recommend recent GCC/G++ version with support for C++11.
+
+# Building #
+You'll need Python, all versions are supported but ***we highly recommend 3.0 or later***. You'll also need **Cython** and **python-dev** packages. Other packages may be required but you can easily install them using **pip tool**.
+
+On Windows we recommend using Anaconda.
+
+Before building the extension you have to navigate to the folder containing the [binaries](../binaries):
+```
+cd ultimateALPR-SDK/binaries/<>/<>
+```
+For example:
+ - On Windows x86_64: [binaries/windows/x86_64](../binaries/windows/x86_64)
+ - On Linux x86_64: [binaries/linux/x86_64](../binaries/linux/x86_64)
+ - On Linux aarch64: [binaries/linux/aarch64](../binaries/linux/aarch64)
+ - On Raspbian arm32 : [binaries/raspbian/armv7l](../binaries/raspbian/armv7l)
+ - ... you got the idea
+
+ From the the binaries folder (`ultimateALPR-SDK/binaries/<>/<>`), call the [setup.py](setup.py) script:
+ ```
+ python ../../../python/setup.py build_ext --inplace -v
+ ```
+ If you have multiple python versions installed, then you may use a virtual env, a full path to the executable... Also, you may use `python3` instead of `python` to make sure you're using version 3. The python version used to build the extension should be the same as the one running the samples.
+
+ This will build and install the extension in the current folder. The extension is named **_ultimateAlpr.pyd** on Windows and **_ultimateAlpr.so** on Linux.
+
+ Now you're ready to run Python scripts using the extension. We highly recommend checking the recognizer sample: [ultimateALPR-SDK/samples/python/recognizer](../samples/python/recognizer)
+
+ # Know issues #
+ If you get `TypeError: super() takes at least 1 argument (0 given)` error message, then make sure you're using Python 3. We tested the code on version **3.6.9** (Windows 8), **3.6.8** (Ubuntu 18) and **3.7.3** (Raspbian Buster). Run `python --version` to print your Python version. You may use `python3` instead of `python` to make sure you're using version 3.
diff --git a/python/setup.py b/python/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4b69299c1358f30d62716f0d63bfef3d54dc2b8
--- /dev/null
+++ b/python/setup.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+"""
+pyton_setup.py file for SWIG ultimateAlprSdk
+You must run this file from 'binaries/os/arch' (e.g. 'binaries/windows/x86_64') folder.
+"""
+
+from distutils.core import setup, Extension
+from distutils import sysconfig
+from Cython.Distutils import build_ext
+from sys import platform
+import os
+
+# Shared library name
+print("Your platform: %s" % platform)
+LIBNAME = 'ultimate_alpr-sdk'
+if platform.startswith('win'):
+ LIBNAME = 'ultimateALPR-SDK'
+
+# Do not add suffix (e.g. 'cp36-win_amd64')
+class NoSuffixBuilder(build_ext):
+ def get_ext_filename(self, ext_name):
+ filename = super().get_ext_filename(ext_name)
+ suffix = sysconfig.get_config_var('EXT_SUFFIX')
+ ext = os.path.splitext(filename)[1]
+ return filename.replace(suffix, "") + ext
+
+ultimateAlprSdk_module = Extension('_ultimateAlprSdk',
+ sources=[os.path.abspath('../../../python/ultimateALPR-SDK-API-PUBLIC-SWIG_python.cxx')],
+ include_dirs=['../../../c++'],
+ language='c++11',
+ library_dirs=['.'],
+ libraries=[LIBNAME]
+ )
+
+setup (name = 'ultimateAlprSdk',
+ version = '3.0.0',
+ author = "Doubango AI",
+ description = """ultimateAlprSdk for python""",
+ ext_modules = [ultimateAlprSdk_module],
+ py_modules = ["ultimateAlprSdk"],
+ cmdclass={"build_ext": NoSuffixBuilder},
+ )
\ No newline at end of file
diff --git a/python/ultimateALPR-SDK-API-PUBLIC-SWIG_python.cxx b/python/ultimateALPR-SDK-API-PUBLIC-SWIG_python.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..afbf38a156a29e6932c5f25c7e978e8dd1f3579e
--- /dev/null
+++ b/python/ultimateALPR-SDK-API-PUBLIC-SWIG_python.cxx
@@ -0,0 +1,6497 @@
+/* ----------------------------------------------------------------------------
+ * This file was automatically generated by SWIG (http://www.swig.org).
+ * Version 2.0.9
+ *
+ * This file is not intended to be easily readable and contains a number of
+ * coding conventions designed to improve portability and efficiency. Do not make
+ * changes to this file unless you know what you are doing--modify the SWIG
+ * interface file instead.
+ * ----------------------------------------------------------------------------- */
+
+#define SWIGPYTHON
+#define SWIG_DIRECTORS
+#define SWIG_PYTHON_DIRECTOR_NO_VTABLE
+
+
+#ifdef __cplusplus
+/* SwigValueWrapper is described in swig.swg */
+template class SwigValueWrapper {
+ struct SwigMovePointer {
+ T *ptr;
+ SwigMovePointer(T *p) : ptr(p) { }
+ ~SwigMovePointer() { delete ptr; }
+ SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; }
+ } pointer;
+ SwigValueWrapper& operator=(const SwigValueWrapper& rhs);
+ SwigValueWrapper(const SwigValueWrapper& rhs);
+public:
+ SwigValueWrapper() : pointer(0) { }
+ SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; }
+ operator T&() const { return *pointer.ptr; }
+ T *operator&() { return pointer.ptr; }
+};
+
+template T SwigValueInit() {
+ return T();
+}
+#endif
+
+/* -----------------------------------------------------------------------------
+ * This section contains generic SWIG labels for method/variable
+ * declarations/attributes, and other compiler dependent labels.
+ * ----------------------------------------------------------------------------- */
+
+/* template workaround for compilers that cannot correctly implement the C++ standard */
+#ifndef SWIGTEMPLATEDISAMBIGUATOR
+# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560)
+# define SWIGTEMPLATEDISAMBIGUATOR template
+# elif defined(__HP_aCC)
+/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */
+/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */
+# define SWIGTEMPLATEDISAMBIGUATOR template
+# else
+# define SWIGTEMPLATEDISAMBIGUATOR
+# endif
+#endif
+
+/* inline attribute */
+#ifndef SWIGINLINE
+# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__))
+# define SWIGINLINE inline
+# else
+# define SWIGINLINE
+# endif
+#endif
+
+/* attribute recognised by some compilers to avoid 'unused' warnings */
+#ifndef SWIGUNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define SWIGUNUSED __attribute__ ((__unused__))
+# else
+# define SWIGUNUSED
+# endif
+# elif defined(__ICC)
+# define SWIGUNUSED __attribute__ ((__unused__))
+# else
+# define SWIGUNUSED
+# endif
+#endif
+
+#ifndef SWIG_MSC_UNSUPPRESS_4505
+# if defined(_MSC_VER)
+# pragma warning(disable : 4505) /* unreferenced local function has been removed */
+# endif
+#endif
+
+#ifndef SWIGUNUSEDPARM
+# ifdef __cplusplus
+# define SWIGUNUSEDPARM(p)
+# else
+# define SWIGUNUSEDPARM(p) p SWIGUNUSED
+# endif
+#endif
+
+/* internal SWIG method */
+#ifndef SWIGINTERN
+# define SWIGINTERN static SWIGUNUSED
+#endif
+
+/* internal inline SWIG method */
+#ifndef SWIGINTERNINLINE
+# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE
+#endif
+
+/* exporting methods */
+#if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+# ifndef GCC_HASCLASSVISIBILITY
+# define GCC_HASCLASSVISIBILITY
+# endif
+#endif
+
+#ifndef SWIGEXPORT
+# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
+# if defined(STATIC_LINKED)
+# define SWIGEXPORT
+# else
+# define SWIGEXPORT __declspec(dllexport)
+# endif
+# else
+# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY)
+# define SWIGEXPORT __attribute__ ((visibility("default")))
+# else
+# define SWIGEXPORT
+# endif
+# endif
+#endif
+
+/* calling conventions for Windows */
+#ifndef SWIGSTDCALL
+# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
+# define SWIGSTDCALL __stdcall
+# else
+# define SWIGSTDCALL
+# endif
+#endif
+
+/* Deal with Microsoft's attempt at deprecating C standard runtime functions */
+#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE)
+# define _CRT_SECURE_NO_DEPRECATE
+#endif
+
+/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */
+#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE)
+# define _SCL_SECURE_NO_DEPRECATE
+#endif
+
+
+
+/* Python.h has to appear first */
+#include
+
+/* -----------------------------------------------------------------------------
+ * swigrun.swg
+ *
+ * This file contains generic C API SWIG runtime support for pointer
+ * type checking.
+ * ----------------------------------------------------------------------------- */
+
+/* This should only be incremented when either the layout of swig_type_info changes,
+ or for whatever reason, the runtime changes incompatibly */
+#define SWIG_RUNTIME_VERSION "4"
+
+/* define SWIG_TYPE_TABLE_NAME as "SWIG_TYPE_TABLE" */
+#ifdef SWIG_TYPE_TABLE
+# define SWIG_QUOTE_STRING(x) #x
+# define SWIG_EXPAND_AND_QUOTE_STRING(x) SWIG_QUOTE_STRING(x)
+# define SWIG_TYPE_TABLE_NAME SWIG_EXPAND_AND_QUOTE_STRING(SWIG_TYPE_TABLE)
+#else
+# define SWIG_TYPE_TABLE_NAME
+#endif
+
+/*
+ You can use the SWIGRUNTIME and SWIGRUNTIMEINLINE macros for
+ creating a static or dynamic library from the SWIG runtime code.
+ In 99.9% of the cases, SWIG just needs to declare them as 'static'.
+
+ But only do this if strictly necessary, ie, if you have problems
+ with your compiler or suchlike.
+*/
+
+#ifndef SWIGRUNTIME
+# define SWIGRUNTIME SWIGINTERN
+#endif
+
+#ifndef SWIGRUNTIMEINLINE
+# define SWIGRUNTIMEINLINE SWIGRUNTIME SWIGINLINE
+#endif
+
+/* Generic buffer size */
+#ifndef SWIG_BUFFER_SIZE
+# define SWIG_BUFFER_SIZE 1024
+#endif
+
+/* Flags for pointer conversions */
+#define SWIG_POINTER_DISOWN 0x1
+#define SWIG_CAST_NEW_MEMORY 0x2
+
+/* Flags for new pointer objects */
+#define SWIG_POINTER_OWN 0x1
+
+
+/*
+ Flags/methods for returning states.
+
+ The SWIG conversion methods, as ConvertPtr, return an integer
+ that tells if the conversion was successful or not. And if not,
+ an error code can be returned (see swigerrors.swg for the codes).
+
+ Use the following macros/flags to set or process the returning
+ states.
+
+ In old versions of SWIG, code such as the following was usually written:
+
+ if (SWIG_ConvertPtr(obj,vptr,ty.flags) != -1) {
+ // success code
+ } else {
+ //fail code
+ }
+
+ Now you can be more explicit:
+
+ int res = SWIG_ConvertPtr(obj,vptr,ty.flags);
+ if (SWIG_IsOK(res)) {
+ // success code
+ } else {
+ // fail code
+ }
+
+ which is the same really, but now you can also do
+
+ Type *ptr;
+ int res = SWIG_ConvertPtr(obj,(void **)(&ptr),ty.flags);
+ if (SWIG_IsOK(res)) {
+ // success code
+ if (SWIG_IsNewObj(res) {
+ ...
+ delete *ptr;
+ } else {
+ ...
+ }
+ } else {
+ // fail code
+ }
+
+ I.e., now SWIG_ConvertPtr can return new objects and you can
+ identify the case and take care of the deallocation. Of course that
+ also requires SWIG_ConvertPtr to return new result values, such as
+
+ int SWIG_ConvertPtr(obj, ptr,...) {
+ if () {
+ if () {
+ *ptr = ;
+ return SWIG_NEWOBJ;
+ } else {
+ *ptr = ;
+ return SWIG_OLDOBJ;
+ }
+ } else {
+ return SWIG_BADOBJ;
+ }
+ }
+
+ Of course, returning the plain '0(success)/-1(fail)' still works, but you can be
+ more explicit by returning SWIG_BADOBJ, SWIG_ERROR or any of the
+ SWIG errors code.
+
+ Finally, if the SWIG_CASTRANK_MODE is enabled, the result code
+ allows to return the 'cast rank', for example, if you have this
+
+ int food(double)
+ int fooi(int);
+
+ and you call
+
+ food(1) // cast rank '1' (1 -> 1.0)
+ fooi(1) // cast rank '0'
+
+ just use the SWIG_AddCast()/SWIG_CheckState()
+*/
+
+#define SWIG_OK (0)
+#define SWIG_ERROR (-1)
+#define SWIG_IsOK(r) (r >= 0)
+#define SWIG_ArgError(r) ((r != SWIG_ERROR) ? r : SWIG_TypeError)
+
+/* The CastRankLimit says how many bits are used for the cast rank */
+#define SWIG_CASTRANKLIMIT (1 << 8)
+/* The NewMask denotes the object was created (using new/malloc) */
+#define SWIG_NEWOBJMASK (SWIG_CASTRANKLIMIT << 1)
+/* The TmpMask is for in/out typemaps that use temporal objects */
+#define SWIG_TMPOBJMASK (SWIG_NEWOBJMASK << 1)
+/* Simple returning values */
+#define SWIG_BADOBJ (SWIG_ERROR)
+#define SWIG_OLDOBJ (SWIG_OK)
+#define SWIG_NEWOBJ (SWIG_OK | SWIG_NEWOBJMASK)
+#define SWIG_TMPOBJ (SWIG_OK | SWIG_TMPOBJMASK)
+/* Check, add and del mask methods */
+#define SWIG_AddNewMask(r) (SWIG_IsOK(r) ? (r | SWIG_NEWOBJMASK) : r)
+#define SWIG_DelNewMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_NEWOBJMASK) : r)
+#define SWIG_IsNewObj(r) (SWIG_IsOK(r) && (r & SWIG_NEWOBJMASK))
+#define SWIG_AddTmpMask(r) (SWIG_IsOK(r) ? (r | SWIG_TMPOBJMASK) : r)
+#define SWIG_DelTmpMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_TMPOBJMASK) : r)
+#define SWIG_IsTmpObj(r) (SWIG_IsOK(r) && (r & SWIG_TMPOBJMASK))
+
+/* Cast-Rank Mode */
+#if defined(SWIG_CASTRANK_MODE)
+# ifndef SWIG_TypeRank
+# define SWIG_TypeRank unsigned long
+# endif
+# ifndef SWIG_MAXCASTRANK /* Default cast allowed */
+# define SWIG_MAXCASTRANK (2)
+# endif
+# define SWIG_CASTRANKMASK ((SWIG_CASTRANKLIMIT) -1)
+# define SWIG_CastRank(r) (r & SWIG_CASTRANKMASK)
+SWIGINTERNINLINE int SWIG_AddCast(int r) {
+ return SWIG_IsOK(r) ? ((SWIG_CastRank(r) < SWIG_MAXCASTRANK) ? (r + 1) : SWIG_ERROR) : r;
+}
+SWIGINTERNINLINE int SWIG_CheckState(int r) {
+ return SWIG_IsOK(r) ? SWIG_CastRank(r) + 1 : 0;
+}
+#else /* no cast-rank mode */
+# define SWIG_AddCast
+# define SWIG_CheckState(r) (SWIG_IsOK(r) ? 1 : 0)
+#endif
+
+
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void *(*swig_converter_func)(void *, int *);
+typedef struct swig_type_info *(*swig_dycast_func)(void **);
+
+/* Structure to store information on one type */
+typedef struct swig_type_info {
+ const char *name; /* mangled name of this type */
+ const char *str; /* human readable name of this type */
+ swig_dycast_func dcast; /* dynamic cast function down a hierarchy */
+ struct swig_cast_info *cast; /* linked list of types that can cast into this type */
+ void *clientdata; /* language specific type data */
+ int owndata; /* flag if the structure owns the clientdata */
+} swig_type_info;
+
+/* Structure to store a type and conversion function used for casting */
+typedef struct swig_cast_info {
+ swig_type_info *type; /* pointer to type that is equivalent to this type */
+ swig_converter_func converter; /* function to cast the void pointers */
+ struct swig_cast_info *next; /* pointer to next cast in linked list */
+ struct swig_cast_info *prev; /* pointer to the previous cast */
+} swig_cast_info;
+
+/* Structure used to store module information
+ * Each module generates one structure like this, and the runtime collects
+ * all of these structures and stores them in a circularly linked list.*/
+typedef struct swig_module_info {
+ swig_type_info **types; /* Array of pointers to swig_type_info structures that are in this module */
+ size_t size; /* Number of types in this module */
+ struct swig_module_info *next; /* Pointer to next element in circularly linked list */
+ swig_type_info **type_initial; /* Array of initially generated type structures */
+ swig_cast_info **cast_initial; /* Array of initially generated casting structures */
+ void *clientdata; /* Language specific module data */
+} swig_module_info;
+
+/*
+ Compare two type names skipping the space characters, therefore
+ "char*" == "char *" and "Class" == "Class", etc.
+
+ Return 0 when the two name types are equivalent, as in
+ strncmp, but skipping ' '.
+*/
+SWIGRUNTIME int
+SWIG_TypeNameComp(const char *f1, const char *l1,
+ const char *f2, const char *l2) {
+ for (;(f1 != l1) && (f2 != l2); ++f1, ++f2) {
+ while ((*f1 == ' ') && (f1 != l1)) ++f1;
+ while ((*f2 == ' ') && (f2 != l2)) ++f2;
+ if (*f1 != *f2) return (*f1 > *f2) ? 1 : -1;
+ }
+ return (int)((l1 - f1) - (l2 - f2));
+}
+
+/*
+ Check type equivalence in a name list like ||...
+ Return 0 if not equal, 1 if equal
+*/
+SWIGRUNTIME int
+SWIG_TypeEquiv(const char *nb, const char *tb) {
+ int equiv = 0;
+ const char* te = tb + strlen(tb);
+ const char* ne = nb;
+ while (!equiv && *ne) {
+ for (nb = ne; *ne; ++ne) {
+ if (*ne == '|') break;
+ }
+ equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0;
+ if (*ne) ++ne;
+ }
+ return equiv;
+}
+
+/*
+ Check type equivalence in a name list like ||...
+ Return 0 if equal, -1 if nb < tb, 1 if nb > tb
+*/
+SWIGRUNTIME int
+SWIG_TypeCompare(const char *nb, const char *tb) {
+ int equiv = 0;
+ const char* te = tb + strlen(tb);
+ const char* ne = nb;
+ while (!equiv && *ne) {
+ for (nb = ne; *ne; ++ne) {
+ if (*ne == '|') break;
+ }
+ equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0;
+ if (*ne) ++ne;
+ }
+ return equiv;
+}
+
+
+/*
+ Check the typename
+*/
+SWIGRUNTIME swig_cast_info *
+SWIG_TypeCheck(const char *c, swig_type_info *ty) {
+ if (ty) {
+ swig_cast_info *iter = ty->cast;
+ while (iter) {
+ if (strcmp(iter->type->name, c) == 0) {
+ if (iter == ty->cast)
+ return iter;
+ /* Move iter to the top of the linked list */
+ iter->prev->next = iter->next;
+ if (iter->next)
+ iter->next->prev = iter->prev;
+ iter->next = ty->cast;
+ iter->prev = 0;
+ if (ty->cast) ty->cast->prev = iter;
+ ty->cast = iter;
+ return iter;
+ }
+ iter = iter->next;
+ }
+ }
+ return 0;
+}
+
+/*
+ Identical to SWIG_TypeCheck, except strcmp is replaced with a pointer comparison
+*/
+SWIGRUNTIME swig_cast_info *
+SWIG_TypeCheckStruct(swig_type_info *from, swig_type_info *ty) {
+ if (ty) {
+ swig_cast_info *iter = ty->cast;
+ while (iter) {
+ if (iter->type == from) {
+ if (iter == ty->cast)
+ return iter;
+ /* Move iter to the top of the linked list */
+ iter->prev->next = iter->next;
+ if (iter->next)
+ iter->next->prev = iter->prev;
+ iter->next = ty->cast;
+ iter->prev = 0;
+ if (ty->cast) ty->cast->prev = iter;
+ ty->cast = iter;
+ return iter;
+ }
+ iter = iter->next;
+ }
+ }
+ return 0;
+}
+
+/*
+ Cast a pointer up an inheritance hierarchy
+*/
+SWIGRUNTIMEINLINE void *
+SWIG_TypeCast(swig_cast_info *ty, void *ptr, int *newmemory) {
+ return ((!ty) || (!ty->converter)) ? ptr : (*ty->converter)(ptr, newmemory);
+}
+
+/*
+ Dynamic pointer casting. Down an inheritance hierarchy
+*/
+SWIGRUNTIME swig_type_info *
+SWIG_TypeDynamicCast(swig_type_info *ty, void **ptr) {
+ swig_type_info *lastty = ty;
+ if (!ty || !ty->dcast) return ty;
+ while (ty && (ty->dcast)) {
+ ty = (*ty->dcast)(ptr);
+ if (ty) lastty = ty;
+ }
+ return lastty;
+}
+
+/*
+ Return the name associated with this type
+*/
+SWIGRUNTIMEINLINE const char *
+SWIG_TypeName(const swig_type_info *ty) {
+ return ty->name;
+}
+
+/*
+ Return the pretty name associated with this type,
+ that is an unmangled type name in a form presentable to the user.
+*/
+SWIGRUNTIME const char *
+SWIG_TypePrettyName(const swig_type_info *type) {
+ /* The "str" field contains the equivalent pretty names of the
+ type, separated by vertical-bar characters. We choose
+ to print the last name, as it is often (?) the most
+ specific. */
+ if (!type) return NULL;
+ if (type->str != NULL) {
+ const char *last_name = type->str;
+ const char *s;
+ for (s = type->str; *s; s++)
+ if (*s == '|') last_name = s+1;
+ return last_name;
+ }
+ else
+ return type->name;
+}
+
+/*
+ Set the clientdata field for a type
+*/
+SWIGRUNTIME void
+SWIG_TypeClientData(swig_type_info *ti, void *clientdata) {
+ swig_cast_info *cast = ti->cast;
+ /* if (ti->clientdata == clientdata) return; */
+ ti->clientdata = clientdata;
+
+ while (cast) {
+ if (!cast->converter) {
+ swig_type_info *tc = cast->type;
+ if (!tc->clientdata) {
+ SWIG_TypeClientData(tc, clientdata);
+ }
+ }
+ cast = cast->next;
+ }
+}
+SWIGRUNTIME void
+SWIG_TypeNewClientData(swig_type_info *ti, void *clientdata) {
+ SWIG_TypeClientData(ti, clientdata);
+ ti->owndata = 1;
+}
+
+/*
+ Search for a swig_type_info structure only by mangled name
+ Search is a O(log #types)
+
+ We start searching at module start, and finish searching when start == end.
+ Note: if start == end at the beginning of the function, we go all the way around
+ the circular list.
+*/
+SWIGRUNTIME swig_type_info *
+SWIG_MangledTypeQueryModule(swig_module_info *start,
+ swig_module_info *end,
+ const char *name) {
+ swig_module_info *iter = start;
+ do {
+ if (iter->size) {
+ register size_t l = 0;
+ register size_t r = iter->size - 1;
+ do {
+ /* since l+r >= 0, we can (>> 1) instead (/ 2) */
+ register size_t i = (l + r) >> 1;
+ const char *iname = iter->types[i]->name;
+ if (iname) {
+ register int compare = strcmp(name, iname);
+ if (compare == 0) {
+ return iter->types[i];
+ } else if (compare < 0) {
+ if (i) {
+ r = i - 1;
+ } else {
+ break;
+ }
+ } else if (compare > 0) {
+ l = i + 1;
+ }
+ } else {
+ break; /* should never happen */
+ }
+ } while (l <= r);
+ }
+ iter = iter->next;
+ } while (iter != end);
+ return 0;
+}
+
+/*
+ Search for a swig_type_info structure for either a mangled name or a human readable name.
+ It first searches the mangled names of the types, which is a O(log #types)
+ If a type is not found it then searches the human readable names, which is O(#types).
+
+ We start searching at module start, and finish searching when start == end.
+ Note: if start == end at the beginning of the function, we go all the way around
+ the circular list.
+*/
+SWIGRUNTIME swig_type_info *
+SWIG_TypeQueryModule(swig_module_info *start,
+ swig_module_info *end,
+ const char *name) {
+ /* STEP 1: Search the name field using binary search */
+ swig_type_info *ret = SWIG_MangledTypeQueryModule(start, end, name);
+ if (ret) {
+ return ret;
+ } else {
+ /* STEP 2: If the type hasn't been found, do a complete search
+ of the str field (the human readable name) */
+ swig_module_info *iter = start;
+ do {
+ register size_t i = 0;
+ for (; i < iter->size; ++i) {
+ if (iter->types[i]->str && (SWIG_TypeEquiv(iter->types[i]->str, name)))
+ return iter->types[i];
+ }
+ iter = iter->next;
+ } while (iter != end);
+ }
+
+ /* neither found a match */
+ return 0;
+}
+
+/*
+ Pack binary data into a string
+*/
+SWIGRUNTIME char *
+SWIG_PackData(char *c, void *ptr, size_t sz) {
+ static const char hex[17] = "0123456789abcdef";
+ register const unsigned char *u = (unsigned char *) ptr;
+ register const unsigned char *eu = u + sz;
+ for (; u != eu; ++u) {
+ register unsigned char uu = *u;
+ *(c++) = hex[(uu & 0xf0) >> 4];
+ *(c++) = hex[uu & 0xf];
+ }
+ return c;
+}
+
+/*
+ Unpack binary data from a string
+*/
+SWIGRUNTIME const char *
+SWIG_UnpackData(const char *c, void *ptr, size_t sz) {
+ register unsigned char *u = (unsigned char *) ptr;
+ register const unsigned char *eu = u + sz;
+ for (; u != eu; ++u) {
+ register char d = *(c++);
+ register unsigned char uu;
+ if ((d >= '0') && (d <= '9'))
+ uu = ((d - '0') << 4);
+ else if ((d >= 'a') && (d <= 'f'))
+ uu = ((d - ('a'-10)) << 4);
+ else
+ return (char *) 0;
+ d = *(c++);
+ if ((d >= '0') && (d <= '9'))
+ uu |= (d - '0');
+ else if ((d >= 'a') && (d <= 'f'))
+ uu |= (d - ('a'-10));
+ else
+ return (char *) 0;
+ *u = uu;
+ }
+ return c;
+}
+
+/*
+ Pack 'void *' into a string buffer.
+*/
+SWIGRUNTIME char *
+SWIG_PackVoidPtr(char *buff, void *ptr, const char *name, size_t bsz) {
+ char *r = buff;
+ if ((2*sizeof(void *) + 2) > bsz) return 0;
+ *(r++) = '_';
+ r = SWIG_PackData(r,&ptr,sizeof(void *));
+ if (strlen(name) + 1 > (bsz - (r - buff))) return 0;
+ strcpy(r,name);
+ return buff;
+}
+
+SWIGRUNTIME const char *
+SWIG_UnpackVoidPtr(const char *c, void **ptr, const char *name) {
+ if (*c != '_') {
+ if (strcmp(c,"NULL") == 0) {
+ *ptr = (void *) 0;
+ return name;
+ } else {
+ return 0;
+ }
+ }
+ return SWIG_UnpackData(++c,ptr,sizeof(void *));
+}
+
+SWIGRUNTIME char *
+SWIG_PackDataName(char *buff, void *ptr, size_t sz, const char *name, size_t bsz) {
+ char *r = buff;
+ size_t lname = (name ? strlen(name) : 0);
+ if ((2*sz + 2 + lname) > bsz) return 0;
+ *(r++) = '_';
+ r = SWIG_PackData(r,ptr,sz);
+ if (lname) {
+ strncpy(r,name,lname+1);
+ } else {
+ *r = 0;
+ }
+ return buff;
+}
+
+SWIGRUNTIME const char *
+SWIG_UnpackDataName(const char *c, void *ptr, size_t sz, const char *name) {
+ if (*c != '_') {
+ if (strcmp(c,"NULL") == 0) {
+ memset(ptr,0,sz);
+ return name;
+ } else {
+ return 0;
+ }
+ }
+ return SWIG_UnpackData(++c,ptr,sz);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Errors in SWIG */
+#define SWIG_UnknownError -1
+#define SWIG_IOError -2
+#define SWIG_RuntimeError -3
+#define SWIG_IndexError -4
+#define SWIG_TypeError -5
+#define SWIG_DivisionByZero -6
+#define SWIG_OverflowError -7
+#define SWIG_SyntaxError -8
+#define SWIG_ValueError -9
+#define SWIG_SystemError -10
+#define SWIG_AttributeError -11
+#define SWIG_MemoryError -12
+#define SWIG_NullReferenceError -13
+
+
+
+/* Compatibility macros for Python 3 */
+#if PY_VERSION_HEX >= 0x03000000
+
+#define PyClass_Check(obj) PyObject_IsInstance(obj, (PyObject *)&PyType_Type)
+#define PyInt_Check(x) PyLong_Check(x)
+#define PyInt_AsLong(x) PyLong_AsLong(x)
+#define PyInt_FromLong(x) PyLong_FromLong(x)
+#define PyInt_FromSize_t(x) PyLong_FromSize_t(x)
+#define PyString_Check(name) PyBytes_Check(name)
+#define PyString_FromString(x) PyUnicode_FromString(x)
+#define PyString_Format(fmt, args) PyUnicode_Format(fmt, args)
+#define PyString_AsString(str) PyBytes_AsString(str)
+#define PyString_Size(str) PyBytes_Size(str)
+#define PyString_InternFromString(key) PyUnicode_InternFromString(key)
+#define Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_BASETYPE
+#define PyString_AS_STRING(x) PyUnicode_AS_STRING(x)
+#define _PyLong_FromSsize_t(x) PyLong_FromSsize_t(x)
+
+#endif
+
+#ifndef Py_TYPE
+# define Py_TYPE(op) ((op)->ob_type)
+#endif
+
+/* SWIG APIs for compatibility of both Python 2 & 3 */
+
+#if PY_VERSION_HEX >= 0x03000000
+# define SWIG_Python_str_FromFormat PyUnicode_FromFormat
+#else
+# define SWIG_Python_str_FromFormat PyString_FromFormat
+#endif
+
+
+/* Warning: This function will allocate a new string in Python 3,
+ * so please call SWIG_Python_str_DelForPy3(x) to free the space.
+ */
+SWIGINTERN char*
+SWIG_Python_str_AsChar(PyObject *str)
+{
+#if PY_VERSION_HEX >= 0x03000000
+ char *cstr;
+ char *newstr;
+ Py_ssize_t len;
+ str = PyUnicode_AsUTF8String(str);
+ PyBytes_AsStringAndSize(str, &cstr, &len);
+ newstr = (char *) malloc(len+1);
+ memcpy(newstr, cstr, len+1);
+ Py_XDECREF(str);
+ return newstr;
+#else
+ return PyString_AsString(str);
+#endif
+}
+
+#if PY_VERSION_HEX >= 0x03000000
+# define SWIG_Python_str_DelForPy3(x) free( (void*) (x) )
+#else
+# define SWIG_Python_str_DelForPy3(x)
+#endif
+
+
+SWIGINTERN PyObject*
+SWIG_Python_str_FromChar(const char *c)
+{
+#if PY_VERSION_HEX >= 0x03000000
+ return PyUnicode_FromString(c);
+#else
+ return PyString_FromString(c);
+#endif
+}
+
+/* Add PyOS_snprintf for old Pythons */
+#if PY_VERSION_HEX < 0x02020000
+# if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM)
+# define PyOS_snprintf _snprintf
+# else
+# define PyOS_snprintf snprintf
+# endif
+#endif
+
+/* A crude PyString_FromFormat implementation for old Pythons */
+#if PY_VERSION_HEX < 0x02020000
+
+#ifndef SWIG_PYBUFFER_SIZE
+# define SWIG_PYBUFFER_SIZE 1024
+#endif
+
+static PyObject *
+PyString_FromFormat(const char *fmt, ...) {
+ va_list ap;
+ char buf[SWIG_PYBUFFER_SIZE * 2];
+ int res;
+ va_start(ap, fmt);
+ res = vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+ return (res < 0 || res >= (int)sizeof(buf)) ? 0 : PyString_FromString(buf);
+}
+#endif
+
+/* Add PyObject_Del for old Pythons */
+#if PY_VERSION_HEX < 0x01060000
+# define PyObject_Del(op) PyMem_DEL((op))
+#endif
+#ifndef PyObject_DEL
+# define PyObject_DEL PyObject_Del
+#endif
+
+/* A crude PyExc_StopIteration exception for old Pythons */
+#if PY_VERSION_HEX < 0x02020000
+# ifndef PyExc_StopIteration
+# define PyExc_StopIteration PyExc_RuntimeError
+# endif
+# ifndef PyObject_GenericGetAttr
+# define PyObject_GenericGetAttr 0
+# endif
+#endif
+
+/* Py_NotImplemented is defined in 2.1 and up. */
+#if PY_VERSION_HEX < 0x02010000
+# ifndef Py_NotImplemented
+# define Py_NotImplemented PyExc_RuntimeError
+# endif
+#endif
+
+/* A crude PyString_AsStringAndSize implementation for old Pythons */
+#if PY_VERSION_HEX < 0x02010000
+# ifndef PyString_AsStringAndSize
+# define PyString_AsStringAndSize(obj, s, len) {*s = PyString_AsString(obj); *len = *s ? strlen(*s) : 0;}
+# endif
+#endif
+
+/* PySequence_Size for old Pythons */
+#if PY_VERSION_HEX < 0x02000000
+# ifndef PySequence_Size
+# define PySequence_Size PySequence_Length
+# endif
+#endif
+
+/* PyBool_FromLong for old Pythons */
+#if PY_VERSION_HEX < 0x02030000
+static
+PyObject *PyBool_FromLong(long ok)
+{
+ PyObject *result = ok ? Py_True : Py_False;
+ Py_INCREF(result);
+ return result;
+}
+#endif
+
+/* Py_ssize_t for old Pythons */
+/* This code is as recommended by: */
+/* http://www.python.org/dev/peps/pep-0353/#conversion-guidelines */
+#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
+typedef int Py_ssize_t;
+# define PY_SSIZE_T_MAX INT_MAX
+# define PY_SSIZE_T_MIN INT_MIN
+typedef inquiry lenfunc;
+typedef intargfunc ssizeargfunc;
+typedef intintargfunc ssizessizeargfunc;
+typedef intobjargproc ssizeobjargproc;
+typedef intintobjargproc ssizessizeobjargproc;
+typedef getreadbufferproc readbufferproc;
+typedef getwritebufferproc writebufferproc;
+typedef getsegcountproc segcountproc;
+typedef getcharbufferproc charbufferproc;
+static long PyNumber_AsSsize_t (PyObject *x, void *SWIGUNUSEDPARM(exc))
+{
+ long result = 0;
+ PyObject *i = PyNumber_Int(x);
+ if (i) {
+ result = PyInt_AsLong(i);
+ Py_DECREF(i);
+ }
+ return result;
+}
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+#define PyInt_FromSize_t(x) PyInt_FromLong((long)x)
+#endif
+
+#if PY_VERSION_HEX < 0x02040000
+#define Py_VISIT(op) \
+ do { \
+ if (op) { \
+ int vret = visit((op), arg); \
+ if (vret) \
+ return vret; \
+ } \
+ } while (0)
+#endif
+
+#if PY_VERSION_HEX < 0x02030000
+typedef struct {
+ PyTypeObject type;
+ PyNumberMethods as_number;
+ PyMappingMethods as_mapping;
+ PySequenceMethods as_sequence;
+ PyBufferProcs as_buffer;
+ PyObject *name, *slots;
+} PyHeapTypeObject;
+#endif
+
+#if PY_VERSION_HEX < 0x02030000
+typedef destructor freefunc;
+#endif
+
+#if ((PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION > 6) || \
+ (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION > 0) || \
+ (PY_MAJOR_VERSION > 3))
+# define SWIGPY_USE_CAPSULE
+# define SWIGPY_CAPSULE_NAME ((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION ".type_pointer_capsule" SWIG_TYPE_TABLE_NAME)
+#endif
+
+#if PY_VERSION_HEX < 0x03020000
+#define PyDescr_TYPE(x) (((PyDescrObject *)(x))->d_type)
+#define PyDescr_NAME(x) (((PyDescrObject *)(x))->d_name)
+#endif
+
+/* -----------------------------------------------------------------------------
+ * error manipulation
+ * ----------------------------------------------------------------------------- */
+
+SWIGRUNTIME PyObject*
+SWIG_Python_ErrorType(int code) {
+ PyObject* type = 0;
+ switch(code) {
+ case SWIG_MemoryError:
+ type = PyExc_MemoryError;
+ break;
+ case SWIG_IOError:
+ type = PyExc_IOError;
+ break;
+ case SWIG_RuntimeError:
+ type = PyExc_RuntimeError;
+ break;
+ case SWIG_IndexError:
+ type = PyExc_IndexError;
+ break;
+ case SWIG_TypeError:
+ type = PyExc_TypeError;
+ break;
+ case SWIG_DivisionByZero:
+ type = PyExc_ZeroDivisionError;
+ break;
+ case SWIG_OverflowError:
+ type = PyExc_OverflowError;
+ break;
+ case SWIG_SyntaxError:
+ type = PyExc_SyntaxError;
+ break;
+ case SWIG_ValueError:
+ type = PyExc_ValueError;
+ break;
+ case SWIG_SystemError:
+ type = PyExc_SystemError;
+ break;
+ case SWIG_AttributeError:
+ type = PyExc_AttributeError;
+ break;
+ default:
+ type = PyExc_RuntimeError;
+ }
+ return type;
+}
+
+
+SWIGRUNTIME void
+SWIG_Python_AddErrorMsg(const char* mesg)
+{
+ PyObject *type = 0;
+ PyObject *value = 0;
+ PyObject *traceback = 0;
+
+ if (PyErr_Occurred()) PyErr_Fetch(&type, &value, &traceback);
+ if (value) {
+ char *tmp;
+ PyObject *old_str = PyObject_Str(value);
+ PyErr_Clear();
+ Py_XINCREF(type);
+
+ PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg);
+ SWIG_Python_str_DelForPy3(tmp);
+ Py_DECREF(old_str);
+ Py_DECREF(value);
+ } else {
+ PyErr_SetString(PyExc_RuntimeError, mesg);
+ }
+}
+
+#if defined(SWIG_PYTHON_NO_THREADS)
+# if defined(SWIG_PYTHON_THREADS)
+# undef SWIG_PYTHON_THREADS
+# endif
+#endif
+#if defined(SWIG_PYTHON_THREADS) /* Threading support is enabled */
+# if !defined(SWIG_PYTHON_USE_GIL) && !defined(SWIG_PYTHON_NO_USE_GIL)
+# if (PY_VERSION_HEX >= 0x02030000) /* For 2.3 or later, use the PyGILState calls */
+# define SWIG_PYTHON_USE_GIL
+# endif
+# endif
+# if defined(SWIG_PYTHON_USE_GIL) /* Use PyGILState threads calls */
+# ifndef SWIG_PYTHON_INITIALIZE_THREADS
+# define SWIG_PYTHON_INITIALIZE_THREADS PyEval_InitThreads()
+# endif
+# ifdef __cplusplus /* C++ code */
+ class SWIG_Python_Thread_Block {
+ bool status;
+ PyGILState_STATE state;
+ public:
+ void end() { if (status) { PyGILState_Release(state); status = false;} }
+ SWIG_Python_Thread_Block() : status(true), state(PyGILState_Ensure()) {}
+ ~SWIG_Python_Thread_Block() { end(); }
+ };
+ class SWIG_Python_Thread_Allow {
+ bool status;
+ PyThreadState *save;
+ public:
+ void end() { if (status) { PyEval_RestoreThread(save); status = false; }}
+ SWIG_Python_Thread_Allow() : status(true), save(PyEval_SaveThread()) {}
+ ~SWIG_Python_Thread_Allow() { end(); }
+ };
+# define SWIG_PYTHON_THREAD_BEGIN_BLOCK SWIG_Python_Thread_Block _swig_thread_block
+# define SWIG_PYTHON_THREAD_END_BLOCK _swig_thread_block.end()
+# define SWIG_PYTHON_THREAD_BEGIN_ALLOW SWIG_Python_Thread_Allow _swig_thread_allow
+# define SWIG_PYTHON_THREAD_END_ALLOW _swig_thread_allow.end()
+# else /* C code */
+# define SWIG_PYTHON_THREAD_BEGIN_BLOCK PyGILState_STATE _swig_thread_block = PyGILState_Ensure()
+# define SWIG_PYTHON_THREAD_END_BLOCK PyGILState_Release(_swig_thread_block)
+# define SWIG_PYTHON_THREAD_BEGIN_ALLOW PyThreadState *_swig_thread_allow = PyEval_SaveThread()
+# define SWIG_PYTHON_THREAD_END_ALLOW PyEval_RestoreThread(_swig_thread_allow)
+# endif
+# else /* Old thread way, not implemented, user must provide it */
+# if !defined(SWIG_PYTHON_INITIALIZE_THREADS)
+# define SWIG_PYTHON_INITIALIZE_THREADS
+# endif
+# if !defined(SWIG_PYTHON_THREAD_BEGIN_BLOCK)
+# define SWIG_PYTHON_THREAD_BEGIN_BLOCK
+# endif
+# if !defined(SWIG_PYTHON_THREAD_END_BLOCK)
+# define SWIG_PYTHON_THREAD_END_BLOCK
+# endif
+# if !defined(SWIG_PYTHON_THREAD_BEGIN_ALLOW)
+# define SWIG_PYTHON_THREAD_BEGIN_ALLOW
+# endif
+# if !defined(SWIG_PYTHON_THREAD_END_ALLOW)
+# define SWIG_PYTHON_THREAD_END_ALLOW
+# endif
+# endif
+#else /* No thread support */
+# define SWIG_PYTHON_INITIALIZE_THREADS
+# define SWIG_PYTHON_THREAD_BEGIN_BLOCK
+# define SWIG_PYTHON_THREAD_END_BLOCK
+# define SWIG_PYTHON_THREAD_BEGIN_ALLOW
+# define SWIG_PYTHON_THREAD_END_ALLOW
+#endif
+
+/* -----------------------------------------------------------------------------
+ * Python API portion that goes into the runtime
+ * ----------------------------------------------------------------------------- */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* -----------------------------------------------------------------------------
+ * Constant declarations
+ * ----------------------------------------------------------------------------- */
+
+/* Constant Types */
+#define SWIG_PY_POINTER 4
+#define SWIG_PY_BINARY 5
+
+/* Constant information structure */
+typedef struct swig_const_info {
+ int type;
+ char *name;
+ long lvalue;
+ double dvalue;
+ void *pvalue;
+ swig_type_info **ptype;
+} swig_const_info;
+
+
+/* -----------------------------------------------------------------------------
+ * Wrapper of PyInstanceMethod_New() used in Python 3
+ * It is exported to the generated module, used for -fastproxy
+ * ----------------------------------------------------------------------------- */
+#if PY_VERSION_HEX >= 0x03000000
+SWIGRUNTIME PyObject* SWIG_PyInstanceMethod_New(PyObject *SWIGUNUSEDPARM(self), PyObject *func)
+{
+ return PyInstanceMethod_New(func);
+}
+#else
+SWIGRUNTIME PyObject* SWIG_PyInstanceMethod_New(PyObject *SWIGUNUSEDPARM(self), PyObject *SWIGUNUSEDPARM(func))
+{
+ return NULL;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+
+/* -----------------------------------------------------------------------------
+ * pyrun.swg
+ *
+ * This file contains the runtime support for Python modules
+ * and includes code for managing global variables and pointer
+ * type checking.
+ *
+ * ----------------------------------------------------------------------------- */
+
+/* Common SWIG API */
+
+/* for raw pointers */
+#define SWIG_Python_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, 0)
+#define SWIG_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtr(obj, pptr, type, flags)
+#define SWIG_ConvertPtrAndOwn(obj,pptr,type,flags,own) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, own)
+
+#ifdef SWIGPYTHON_BUILTIN
+#define SWIG_NewPointerObj(ptr, type, flags) SWIG_Python_NewPointerObj(self, ptr, type, flags)
+#else
+#define SWIG_NewPointerObj(ptr, type, flags) SWIG_Python_NewPointerObj(NULL, ptr, type, flags)
+#endif
+
+#define SWIG_InternalNewPointerObj(ptr, type, flags) SWIG_Python_NewPointerObj(NULL, ptr, type, flags)
+
+#define SWIG_CheckImplicit(ty) SWIG_Python_CheckImplicit(ty)
+#define SWIG_AcquirePtr(ptr, src) SWIG_Python_AcquirePtr(ptr, src)
+#define swig_owntype int
+
+/* for raw packed data */
+#define SWIG_ConvertPacked(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty)
+#define SWIG_NewPackedObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type)
+
+/* for class or struct pointers */
+#define SWIG_ConvertInstance(obj, pptr, type, flags) SWIG_ConvertPtr(obj, pptr, type, flags)
+#define SWIG_NewInstanceObj(ptr, type, flags) SWIG_NewPointerObj(ptr, type, flags)
+
+/* for C or C++ function pointers */
+#define SWIG_ConvertFunctionPtr(obj, pptr, type) SWIG_Python_ConvertFunctionPtr(obj, pptr, type)
+#define SWIG_NewFunctionPtrObj(ptr, type) SWIG_Python_NewPointerObj(NULL, ptr, type, 0)
+
+/* for C++ member pointers, ie, member methods */
+#define SWIG_ConvertMember(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty)
+#define SWIG_NewMemberObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type)
+
+
+/* Runtime API */
+
+#define SWIG_GetModule(clientdata) SWIG_Python_GetModule(clientdata)
+#define SWIG_SetModule(clientdata, pointer) SWIG_Python_SetModule(pointer)
+#define SWIG_NewClientData(obj) SwigPyClientData_New(obj)
+
+#define SWIG_SetErrorObj SWIG_Python_SetErrorObj
+#define SWIG_SetErrorMsg SWIG_Python_SetErrorMsg
+#define SWIG_ErrorType(code) SWIG_Python_ErrorType(code)
+#define SWIG_Error(code, msg) SWIG_Python_SetErrorMsg(SWIG_ErrorType(code), msg)
+#define SWIG_fail goto fail
+
+
+/* Runtime API implementation */
+
+/* Error manipulation */
+
+SWIGINTERN void
+SWIG_Python_SetErrorObj(PyObject *errtype, PyObject *obj) {
+ SWIG_PYTHON_THREAD_BEGIN_BLOCK;
+ PyErr_SetObject(errtype, obj);
+ Py_DECREF(obj);
+ SWIG_PYTHON_THREAD_END_BLOCK;
+}
+
+SWIGINTERN void
+SWIG_Python_SetErrorMsg(PyObject *errtype, const char *msg) {
+ SWIG_PYTHON_THREAD_BEGIN_BLOCK;
+ PyErr_SetString(errtype, msg);
+ SWIG_PYTHON_THREAD_END_BLOCK;
+}
+
+#define SWIG_Python_Raise(obj, type, desc) SWIG_Python_SetErrorObj(SWIG_Python_ExceptionType(desc), obj)
+
+/* Set a constant value */
+
+#if defined(SWIGPYTHON_BUILTIN)
+
+SWIGINTERN void
+SwigPyBuiltin_AddPublicSymbol(PyObject *seq, const char *key) {
+ PyObject *s = PyString_InternFromString(key);
+ PyList_Append(seq, s);
+ Py_DECREF(s);
+}
+
+SWIGINTERN void
+SWIG_Python_SetConstant(PyObject *d, PyObject *public_interface, const char *name, PyObject *obj) {
+#if PY_VERSION_HEX < 0x02030000
+ PyDict_SetItemString(d, (char *)name, obj);
+#else
+ PyDict_SetItemString(d, name, obj);
+#endif
+ Py_DECREF(obj);
+ if (public_interface)
+ SwigPyBuiltin_AddPublicSymbol(public_interface, name);
+}
+
+#else
+
+SWIGINTERN void
+SWIG_Python_SetConstant(PyObject *d, const char *name, PyObject *obj) {
+#if PY_VERSION_HEX < 0x02030000
+ PyDict_SetItemString(d, (char *)name, obj);
+#else
+ PyDict_SetItemString(d, name, obj);
+#endif
+ Py_DECREF(obj);
+}
+
+#endif
+
+/* Append a value to the result obj */
+
+SWIGINTERN PyObject*
+SWIG_Python_AppendOutput(PyObject* result, PyObject* obj) {
+#if !defined(SWIG_PYTHON_OUTPUT_TUPLE)
+ if (!result) {
+ result = obj;
+ } else if (result == Py_None) {
+ Py_DECREF(result);
+ result = obj;
+ } else {
+ if (!PyList_Check(result)) {
+ PyObject *o2 = result;
+ result = PyList_New(1);
+ PyList_SetItem(result, 0, o2);
+ }
+ PyList_Append(result,obj);
+ Py_DECREF(obj);
+ }
+ return result;
+#else
+ PyObject* o2;
+ PyObject* o3;
+ if (!result) {
+ result = obj;
+ } else if (result == Py_None) {
+ Py_DECREF(result);
+ result = obj;
+ } else {
+ if (!PyTuple_Check(result)) {
+ o2 = result;
+ result = PyTuple_New(1);
+ PyTuple_SET_ITEM(result, 0, o2);
+ }
+ o3 = PyTuple_New(1);
+ PyTuple_SET_ITEM(o3, 0, obj);
+ o2 = result;
+ result = PySequence_Concat(o2, o3);
+ Py_DECREF(o2);
+ Py_DECREF(o3);
+ }
+ return result;
+#endif
+}
+
+/* Unpack the argument tuple */
+
+SWIGINTERN int
+SWIG_Python_UnpackTuple(PyObject *args, const char *name, Py_ssize_t min, Py_ssize_t max, PyObject **objs)
+{
+ if (!args) {
+ if (!min && !max) {
+ return 1;
+ } else {
+ PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got none",
+ name, (min == max ? "" : "at least "), (int)min);
+ return 0;
+ }
+ }
+ if (!PyTuple_Check(args)) {
+ if (min <= 1 && max >= 1) {
+ register int i;
+ objs[0] = args;
+ for (i = 1; i < max; ++i) {
+ objs[i] = 0;
+ }
+ return 2;
+ }
+ PyErr_SetString(PyExc_SystemError, "UnpackTuple() argument list is not a tuple");
+ return 0;
+ } else {
+ register Py_ssize_t l = PyTuple_GET_SIZE(args);
+ if (l < min) {
+ PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d",
+ name, (min == max ? "" : "at least "), (int)min, (int)l);
+ return 0;
+ } else if (l > max) {
+ PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d",
+ name, (min == max ? "" : "at most "), (int)max, (int)l);
+ return 0;
+ } else {
+ register int i;
+ for (i = 0; i < l; ++i) {
+ objs[i] = PyTuple_GET_ITEM(args, i);
+ }
+ for (; l < max; ++l) {
+ objs[l] = 0;
+ }
+ return i + 1;
+ }
+ }
+}
+
+/* A functor is a function object with one single object argument */
+#if PY_VERSION_HEX >= 0x02020000
+#define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunctionObjArgs(functor, obj, NULL);
+#else
+#define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunction(functor, "O", obj);
+#endif
+
+/*
+ Helper for static pointer initialization for both C and C++ code, for example
+ static PyObject *SWIG_STATIC_POINTER(MyVar) = NewSomething(...);
+*/
+#ifdef __cplusplus
+#define SWIG_STATIC_POINTER(var) var
+#else
+#define SWIG_STATIC_POINTER(var) var = 0; if (!var) var
+#endif
+
+/* -----------------------------------------------------------------------------
+ * Pointer declarations
+ * ----------------------------------------------------------------------------- */
+
+/* Flags for new pointer objects */
+#define SWIG_POINTER_NOSHADOW (SWIG_POINTER_OWN << 1)
+#define SWIG_POINTER_NEW (SWIG_POINTER_NOSHADOW | SWIG_POINTER_OWN)
+
+#define SWIG_POINTER_IMPLICIT_CONV (SWIG_POINTER_DISOWN << 1)
+
+#define SWIG_BUILTIN_TP_INIT (SWIG_POINTER_OWN << 2)
+#define SWIG_BUILTIN_INIT (SWIG_BUILTIN_TP_INIT | SWIG_POINTER_OWN)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* How to access Py_None */
+#if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
+# ifndef SWIG_PYTHON_NO_BUILD_NONE
+# ifndef SWIG_PYTHON_BUILD_NONE
+# define SWIG_PYTHON_BUILD_NONE
+# endif
+# endif
+#endif
+
+#ifdef SWIG_PYTHON_BUILD_NONE
+# ifdef Py_None
+# undef Py_None
+# define Py_None SWIG_Py_None()
+# endif
+SWIGRUNTIMEINLINE PyObject *
+_SWIG_Py_None(void)
+{
+ PyObject *none = Py_BuildValue((char*)"");
+ Py_DECREF(none);
+ return none;
+}
+SWIGRUNTIME PyObject *
+SWIG_Py_None(void)
+{
+ static PyObject *SWIG_STATIC_POINTER(none) = _SWIG_Py_None();
+ return none;
+}
+#endif
+
+/* The python void return value */
+
+SWIGRUNTIMEINLINE PyObject *
+SWIG_Py_Void(void)
+{
+ PyObject *none = Py_None;
+ Py_INCREF(none);
+ return none;
+}
+
+/* SwigPyClientData */
+
+typedef struct {
+ PyObject *klass;
+ PyObject *newraw;
+ PyObject *newargs;
+ PyObject *destroy;
+ int delargs;
+ int implicitconv;
+ PyTypeObject *pytype;
+} SwigPyClientData;
+
+SWIGRUNTIMEINLINE int
+SWIG_Python_CheckImplicit(swig_type_info *ty)
+{
+ SwigPyClientData *data = (SwigPyClientData *)ty->clientdata;
+ return data ? data->implicitconv : 0;
+}
+
+SWIGRUNTIMEINLINE PyObject *
+SWIG_Python_ExceptionType(swig_type_info *desc) {
+ SwigPyClientData *data = desc ? (SwigPyClientData *) desc->clientdata : 0;
+ PyObject *klass = data ? data->klass : 0;
+ return (klass ? klass : PyExc_RuntimeError);
+}
+
+
+SWIGRUNTIME SwigPyClientData *
+SwigPyClientData_New(PyObject* obj)
+{
+ if (!obj) {
+ return 0;
+ } else {
+ SwigPyClientData *data = (SwigPyClientData *)malloc(sizeof(SwigPyClientData));
+ /* the klass element */
+ data->klass = obj;
+ Py_INCREF(data->klass);
+ /* the newraw method and newargs arguments used to create a new raw instance */
+ if (PyClass_Check(obj)) {
+ data->newraw = 0;
+ data->newargs = obj;
+ Py_INCREF(obj);
+ } else {
+#if (PY_VERSION_HEX < 0x02020000)
+ data->newraw = 0;
+#else
+ data->newraw = PyObject_GetAttrString(data->klass, (char *)"__new__");
+#endif
+ if (data->newraw) {
+ Py_INCREF(data->newraw);
+ data->newargs = PyTuple_New(1);
+ PyTuple_SetItem(data->newargs, 0, obj);
+ } else {
+ data->newargs = obj;
+ }
+ Py_INCREF(data->newargs);
+ }
+ /* the destroy method, aka as the C++ delete method */
+ data->destroy = PyObject_GetAttrString(data->klass, (char *)"__swig_destroy__");
+ if (PyErr_Occurred()) {
+ PyErr_Clear();
+ data->destroy = 0;
+ }
+ if (data->destroy) {
+ int flags;
+ Py_INCREF(data->destroy);
+ flags = PyCFunction_GET_FLAGS(data->destroy);
+#ifdef METH_O
+ data->delargs = !(flags & (METH_O));
+#else
+ data->delargs = 0;
+#endif
+ } else {
+ data->delargs = 0;
+ }
+ data->implicitconv = 0;
+ data->pytype = 0;
+ return data;
+ }
+}
+
+SWIGRUNTIME void
+SwigPyClientData_Del(SwigPyClientData *data) {
+ Py_XDECREF(data->newraw);
+ Py_XDECREF(data->newargs);
+ Py_XDECREF(data->destroy);
+}
+
+/* =============== SwigPyObject =====================*/
+
+typedef struct {
+ PyObject_HEAD
+ void *ptr;
+ swig_type_info *ty;
+ int own;
+ PyObject *next;
+#ifdef SWIGPYTHON_BUILTIN
+ PyObject *dict;
+#endif
+} SwigPyObject;
+
+SWIGRUNTIME PyObject *
+SwigPyObject_long(SwigPyObject *v)
+{
+ return PyLong_FromVoidPtr(v->ptr);
+}
+
+SWIGRUNTIME PyObject *
+SwigPyObject_format(const char* fmt, SwigPyObject *v)
+{
+ PyObject *res = NULL;
+ PyObject *args = PyTuple_New(1);
+ if (args) {
+ if (PyTuple_SetItem(args, 0, SwigPyObject_long(v)) == 0) {
+ PyObject *ofmt = SWIG_Python_str_FromChar(fmt);
+ if (ofmt) {
+#if PY_VERSION_HEX >= 0x03000000
+ res = PyUnicode_Format(ofmt,args);
+#else
+ res = PyString_Format(ofmt,args);
+#endif
+ Py_DECREF(ofmt);
+ }
+ Py_DECREF(args);
+ }
+ }
+ return res;
+}
+
+SWIGRUNTIME PyObject *
+SwigPyObject_oct(SwigPyObject *v)
+{
+ return SwigPyObject_format("%o",v);
+}
+
+SWIGRUNTIME PyObject *
+SwigPyObject_hex(SwigPyObject *v)
+{
+ return SwigPyObject_format("%x",v);
+}
+
+SWIGRUNTIME PyObject *
+#ifdef METH_NOARGS
+SwigPyObject_repr(SwigPyObject *v)
+#else
+SwigPyObject_repr(SwigPyObject *v, PyObject *args)
+#endif
+{
+ const char *name = SWIG_TypePrettyName(v->ty);
+ PyObject *repr = SWIG_Python_str_FromFormat("", (name ? name : "unknown"), (void *)v);
+ if (v->next) {
+# ifdef METH_NOARGS
+ PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next);
+# else
+ PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next, args);
+# endif
+# if PY_VERSION_HEX >= 0x03000000
+ PyObject *joined = PyUnicode_Concat(repr, nrep);
+ Py_DecRef(repr);
+ Py_DecRef(nrep);
+ repr = joined;
+# else
+ PyString_ConcatAndDel(&repr,nrep);
+# endif
+ }
+ return repr;
+}
+
+SWIGRUNTIME int
+SwigPyObject_print(SwigPyObject *v, FILE *fp, int SWIGUNUSEDPARM(flags))
+{
+ char *str;
+#ifdef METH_NOARGS
+ PyObject *repr = SwigPyObject_repr(v);
+#else
+ PyObject *repr = SwigPyObject_repr(v, NULL);
+#endif
+ if (repr) {
+ str = SWIG_Python_str_AsChar(repr);
+ fputs(str, fp);
+ SWIG_Python_str_DelForPy3(str);
+ Py_DECREF(repr);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+SWIGRUNTIME PyObject *
+SwigPyObject_str(SwigPyObject *v)
+{
+ char result[SWIG_BUFFER_SIZE];
+ return SWIG_PackVoidPtr(result, v->ptr, v->ty->name, sizeof(result)) ?
+ SWIG_Python_str_FromChar(result) : 0;
+}
+
+SWIGRUNTIME int
+SwigPyObject_compare(SwigPyObject *v, SwigPyObject *w)
+{
+ void *i = v->ptr;
+ void *j = w->ptr;
+ return (i < j) ? -1 : ((i > j) ? 1 : 0);
+}
+
+/* Added for Python 3.x, would it also be useful for Python 2.x? */
+SWIGRUNTIME PyObject*
+SwigPyObject_richcompare(SwigPyObject *v, SwigPyObject *w, int op)
+{
+ PyObject* res;
+ if( op != Py_EQ && op != Py_NE ) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ res = PyBool_FromLong( (SwigPyObject_compare(v, w)==0) == (op == Py_EQ) ? 1 : 0);
+ return res;
+}
+
+
+SWIGRUNTIME PyTypeObject* SwigPyObject_TypeOnce(void);
+
+#ifdef SWIGPYTHON_BUILTIN
+static swig_type_info *SwigPyObject_stype = 0;
+SWIGRUNTIME PyTypeObject*
+SwigPyObject_type(void) {
+ SwigPyClientData *cd;
+ assert(SwigPyObject_stype);
+ cd = (SwigPyClientData*) SwigPyObject_stype->clientdata;
+ assert(cd);
+ assert(cd->pytype);
+ return cd->pytype;
+}
+#else
+SWIGRUNTIME PyTypeObject*
+SwigPyObject_type(void) {
+ static PyTypeObject *SWIG_STATIC_POINTER(type) = SwigPyObject_TypeOnce();
+ return type;
+}
+#endif
+
+SWIGRUNTIMEINLINE int
+SwigPyObject_Check(PyObject *op) {
+#ifdef SWIGPYTHON_BUILTIN
+ PyTypeObject *target_tp = SwigPyObject_type();
+ if (PyType_IsSubtype(op->ob_type, target_tp))
+ return 1;
+ return (strcmp(op->ob_type->tp_name, "SwigPyObject") == 0);
+#else
+ return (Py_TYPE(op) == SwigPyObject_type())
+ || (strcmp(Py_TYPE(op)->tp_name,"SwigPyObject") == 0);
+#endif
+}
+
+SWIGRUNTIME PyObject *
+SwigPyObject_New(void *ptr, swig_type_info *ty, int own);
+
+SWIGRUNTIME void
+SwigPyObject_dealloc(PyObject *v)
+{
+ SwigPyObject *sobj = (SwigPyObject *) v;
+ PyObject *next = sobj->next;
+ if (sobj->own == SWIG_POINTER_OWN) {
+ swig_type_info *ty = sobj->ty;
+ SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0;
+ PyObject *destroy = data ? data->destroy : 0;
+ if (destroy) {
+ /* destroy is always a VARARGS method */
+ PyObject *res;
+ if (data->delargs) {
+ /* we need to create a temporary object to carry the destroy operation */
+ PyObject *tmp = SwigPyObject_New(sobj->ptr, ty, 0);
+ res = SWIG_Python_CallFunctor(destroy, tmp);
+ Py_DECREF(tmp);
+ } else {
+ PyCFunction meth = PyCFunction_GET_FUNCTION(destroy);
+ PyObject *mself = PyCFunction_GET_SELF(destroy);
+ res = ((*meth)(mself, v));
+ }
+ Py_XDECREF(res);
+ }
+#if !defined(SWIG_PYTHON_SILENT_MEMLEAK)
+ else {
+ const char *name = SWIG_TypePrettyName(ty);
+ printf("swig/python detected a memory leak of type '%s', no destructor found.\n", (name ? name : "unknown"));
+ }
+#endif
+ }
+ Py_XDECREF(next);
+ PyObject_DEL(v);
+}
+
+SWIGRUNTIME PyObject*
+SwigPyObject_append(PyObject* v, PyObject* next)
+{
+ SwigPyObject *sobj = (SwigPyObject *) v;
+#ifndef METH_O
+ PyObject *tmp = 0;
+ if (!PyArg_ParseTuple(next,(char *)"O:append", &tmp)) return NULL;
+ next = tmp;
+#endif
+ if (!SwigPyObject_Check(next)) {
+ return NULL;
+ }
+ sobj->next = next;
+ Py_INCREF(next);
+ return SWIG_Py_Void();
+}
+
+SWIGRUNTIME PyObject*
+#ifdef METH_NOARGS
+SwigPyObject_next(PyObject* v)
+#else
+SwigPyObject_next(PyObject* v, PyObject *SWIGUNUSEDPARM(args))
+#endif
+{
+ SwigPyObject *sobj = (SwigPyObject *) v;
+ if (sobj->next) {
+ Py_INCREF(sobj->next);
+ return sobj->next;
+ } else {
+ return SWIG_Py_Void();
+ }
+}
+
+SWIGINTERN PyObject*
+#ifdef METH_NOARGS
+SwigPyObject_disown(PyObject *v)
+#else
+SwigPyObject_disown(PyObject* v, PyObject *SWIGUNUSEDPARM(args))
+#endif
+{
+ SwigPyObject *sobj = (SwigPyObject *)v;
+ sobj->own = 0;
+ return SWIG_Py_Void();
+}
+
+SWIGINTERN PyObject*
+#ifdef METH_NOARGS
+SwigPyObject_acquire(PyObject *v)
+#else
+SwigPyObject_acquire(PyObject* v, PyObject *SWIGUNUSEDPARM(args))
+#endif
+{
+ SwigPyObject *sobj = (SwigPyObject *)v;
+ sobj->own = SWIG_POINTER_OWN;
+ return SWIG_Py_Void();
+}
+
+SWIGINTERN PyObject*
+SwigPyObject_own(PyObject *v, PyObject *args)
+{
+ PyObject *val = 0;
+#if (PY_VERSION_HEX < 0x02020000)
+ if (!PyArg_ParseTuple(args,(char *)"|O:own",&val))
+#elif (PY_VERSION_HEX < 0x02050000)
+ if (!PyArg_UnpackTuple(args, (char *)"own", 0, 1, &val))
+#else
+ if (!PyArg_UnpackTuple(args, "own", 0, 1, &val))
+#endif
+ {
+ return NULL;
+ }
+ else
+ {
+ SwigPyObject *sobj = (SwigPyObject *)v;
+ PyObject *obj = PyBool_FromLong(sobj->own);
+ if (val) {
+#ifdef METH_NOARGS
+ if (PyObject_IsTrue(val)) {
+ SwigPyObject_acquire(v);
+ } else {
+ SwigPyObject_disown(v);
+ }
+#else
+ if (PyObject_IsTrue(val)) {
+ SwigPyObject_acquire(v,args);
+ } else {
+ SwigPyObject_disown(v,args);
+ }
+#endif
+ }
+ return obj;
+ }
+}
+
+#ifdef METH_O
+static PyMethodDef
+swigobject_methods[] = {
+ {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_NOARGS, (char *)"releases ownership of the pointer"},
+ {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_NOARGS, (char *)"aquires ownership of the pointer"},
+ {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"},
+ {(char *)"append", (PyCFunction)SwigPyObject_append, METH_O, (char *)"appends another 'this' object"},
+ {(char *)"next", (PyCFunction)SwigPyObject_next, METH_NOARGS, (char *)"returns the next 'this' object"},
+ {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_NOARGS, (char *)"returns object representation"},
+ {0, 0, 0, 0}
+};
+#else
+static PyMethodDef
+swigobject_methods[] = {
+ {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_VARARGS, (char *)"releases ownership of the pointer"},
+ {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_VARARGS, (char *)"aquires ownership of the pointer"},
+ {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"},
+ {(char *)"append", (PyCFunction)SwigPyObject_append, METH_VARARGS, (char *)"appends another 'this' object"},
+ {(char *)"next", (PyCFunction)SwigPyObject_next, METH_VARARGS, (char *)"returns the next 'this' object"},
+ {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_VARARGS, (char *)"returns object representation"},
+ {0, 0, 0, 0}
+};
+#endif
+
+#if PY_VERSION_HEX < 0x02020000
+SWIGINTERN PyObject *
+SwigPyObject_getattr(SwigPyObject *sobj,char *name)
+{
+ return Py_FindMethod(swigobject_methods, (PyObject *)sobj, name);
+}
+#endif
+
+SWIGRUNTIME PyTypeObject*
+SwigPyObject_TypeOnce(void) {
+ static char swigobject_doc[] = "Swig object carries a C/C++ instance pointer";
+
+ static PyNumberMethods SwigPyObject_as_number = {
+ (binaryfunc)0, /*nb_add*/
+ (binaryfunc)0, /*nb_subtract*/
+ (binaryfunc)0, /*nb_multiply*/
+ /* nb_divide removed in Python 3 */
+#if PY_VERSION_HEX < 0x03000000
+ (binaryfunc)0, /*nb_divide*/
+#endif
+ (binaryfunc)0, /*nb_remainder*/
+ (binaryfunc)0, /*nb_divmod*/
+ (ternaryfunc)0,/*nb_power*/
+ (unaryfunc)0, /*nb_negative*/
+ (unaryfunc)0, /*nb_positive*/
+ (unaryfunc)0, /*nb_absolute*/
+ (inquiry)0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+#if PY_VERSION_HEX < 0x03000000
+ 0, /*nb_coerce*/
+#endif
+ (unaryfunc)SwigPyObject_long, /*nb_int*/
+#if PY_VERSION_HEX < 0x03000000
+ (unaryfunc)SwigPyObject_long, /*nb_long*/
+#else
+ 0, /*nb_reserved*/
+#endif
+ (unaryfunc)0, /*nb_float*/
+#if PY_VERSION_HEX < 0x03000000
+ (unaryfunc)SwigPyObject_oct, /*nb_oct*/
+ (unaryfunc)SwigPyObject_hex, /*nb_hex*/
+#endif
+#if PY_VERSION_HEX >= 0x03000000 /* 3.0 */
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index, nb_inplace_divide removed */
+#elif PY_VERSION_HEX >= 0x02050000 /* 2.5.0 */
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index */
+#elif PY_VERSION_HEX >= 0x02020000 /* 2.2.0 */
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_true_divide */
+#elif PY_VERSION_HEX >= 0x02000000 /* 2.0.0 */
+ 0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_or */
+#endif
+ };
+
+ static PyTypeObject swigpyobject_type;
+ static int type_init = 0;
+ if (!type_init) {
+ const PyTypeObject tmp = {
+ /* PyObject header changed in Python 3 */
+#if PY_VERSION_HEX >= 0x03000000
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
+ PyObject_HEAD_INIT(NULL)
+ 0, /* ob_size */
+#endif
+ (char *)"SwigPyObject", /* tp_name */
+ sizeof(SwigPyObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor)SwigPyObject_dealloc, /* tp_dealloc */
+ (printfunc)SwigPyObject_print, /* tp_print */
+#if PY_VERSION_HEX < 0x02020000
+ (getattrfunc)SwigPyObject_getattr, /* tp_getattr */
+#else
+ (getattrfunc)0, /* tp_getattr */
+#endif
+ (setattrfunc)0, /* tp_setattr */
+#if PY_VERSION_HEX >= 0x03000000
+ 0, /* tp_reserved in 3.0.1, tp_compare in 3.0.0 but not used */
+#else
+ (cmpfunc)SwigPyObject_compare, /* tp_compare */
+#endif
+ (reprfunc)SwigPyObject_repr, /* tp_repr */
+ &SwigPyObject_as_number, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ (hashfunc)0, /* tp_hash */
+ (ternaryfunc)0, /* tp_call */
+ (reprfunc)SwigPyObject_str, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ swigobject_doc, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ (richcmpfunc)SwigPyObject_richcompare,/* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+#if PY_VERSION_HEX >= 0x02020000
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ swigobject_methods, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+#endif
+#if PY_VERSION_HEX >= 0x02030000
+ 0, /* tp_del */
+#endif
+#if PY_VERSION_HEX >= 0x02060000
+ 0, /* tp_version */
+#endif
+#ifdef COUNT_ALLOCS
+ 0,0,0,0 /* tp_alloc -> tp_next */
+#endif
+ };
+ swigpyobject_type = tmp;
+ type_init = 1;
+#if PY_VERSION_HEX < 0x02020000
+ swigpyobject_type.ob_type = &PyType_Type;
+#else
+ if (PyType_Ready(&swigpyobject_type) < 0)
+ return NULL;
+#endif
+ }
+ return &swigpyobject_type;
+}
+
+SWIGRUNTIME PyObject *
+SwigPyObject_New(void *ptr, swig_type_info *ty, int own)
+{
+ SwigPyObject *sobj = PyObject_NEW(SwigPyObject, SwigPyObject_type());
+ if (sobj) {
+ sobj->ptr = ptr;
+ sobj->ty = ty;
+ sobj->own = own;
+ sobj->next = 0;
+ }
+ return (PyObject *)sobj;
+}
+
+/* -----------------------------------------------------------------------------
+ * Implements a simple Swig Packed type, and use it instead of string
+ * ----------------------------------------------------------------------------- */
+
+typedef struct {
+ PyObject_HEAD
+ void *pack;
+ swig_type_info *ty;
+ size_t size;
+} SwigPyPacked;
+
+SWIGRUNTIME int
+SwigPyPacked_print(SwigPyPacked *v, FILE *fp, int SWIGUNUSEDPARM(flags))
+{
+ char result[SWIG_BUFFER_SIZE];
+ fputs("pack, v->size, 0, sizeof(result))) {
+ fputs("at ", fp);
+ fputs(result, fp);
+ }
+ fputs(v->ty->name,fp);
+ fputs(">", fp);
+ return 0;
+}
+
+SWIGRUNTIME PyObject *
+SwigPyPacked_repr(SwigPyPacked *v)
+{
+ char result[SWIG_BUFFER_SIZE];
+ if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))) {
+ return SWIG_Python_str_FromFormat("", result, v->ty->name);
+ } else {
+ return SWIG_Python_str_FromFormat("", v->ty->name);
+ }
+}
+
+SWIGRUNTIME PyObject *
+SwigPyPacked_str(SwigPyPacked *v)
+{
+ char result[SWIG_BUFFER_SIZE];
+ if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))){
+ return SWIG_Python_str_FromFormat("%s%s", result, v->ty->name);
+ } else {
+ return SWIG_Python_str_FromChar(v->ty->name);
+ }
+}
+
+SWIGRUNTIME int
+SwigPyPacked_compare(SwigPyPacked *v, SwigPyPacked *w)
+{
+ size_t i = v->size;
+ size_t j = w->size;
+ int s = (i < j) ? -1 : ((i > j) ? 1 : 0);
+ return s ? s : strncmp((char *)v->pack, (char *)w->pack, 2*v->size);
+}
+
+SWIGRUNTIME PyTypeObject* SwigPyPacked_TypeOnce(void);
+
+SWIGRUNTIME PyTypeObject*
+SwigPyPacked_type(void) {
+ static PyTypeObject *SWIG_STATIC_POINTER(type) = SwigPyPacked_TypeOnce();
+ return type;
+}
+
+SWIGRUNTIMEINLINE int
+SwigPyPacked_Check(PyObject *op) {
+ return ((op)->ob_type == SwigPyPacked_TypeOnce())
+ || (strcmp((op)->ob_type->tp_name,"SwigPyPacked") == 0);
+}
+
+SWIGRUNTIME void
+SwigPyPacked_dealloc(PyObject *v)
+{
+ if (SwigPyPacked_Check(v)) {
+ SwigPyPacked *sobj = (SwigPyPacked *) v;
+ free(sobj->pack);
+ }
+ PyObject_DEL(v);
+}
+
+SWIGRUNTIME PyTypeObject*
+SwigPyPacked_TypeOnce(void) {
+ static char swigpacked_doc[] = "Swig object carries a C/C++ instance pointer";
+ static PyTypeObject swigpypacked_type;
+ static int type_init = 0;
+ if (!type_init) {
+ const PyTypeObject tmp = {
+ /* PyObject header changed in Python 3 */
+#if PY_VERSION_HEX>=0x03000000
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
+ PyObject_HEAD_INIT(NULL)
+ 0, /* ob_size */
+#endif
+ (char *)"SwigPyPacked", /* tp_name */
+ sizeof(SwigPyPacked), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor)SwigPyPacked_dealloc, /* tp_dealloc */
+ (printfunc)SwigPyPacked_print, /* tp_print */
+ (getattrfunc)0, /* tp_getattr */
+ (setattrfunc)0, /* tp_setattr */
+#if PY_VERSION_HEX>=0x03000000
+ 0, /* tp_reserved in 3.0.1 */
+#else
+ (cmpfunc)SwigPyPacked_compare, /* tp_compare */
+#endif
+ (reprfunc)SwigPyPacked_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ (hashfunc)0, /* tp_hash */
+ (ternaryfunc)0, /* tp_call */
+ (reprfunc)SwigPyPacked_str, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ swigpacked_doc, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+#if PY_VERSION_HEX >= 0x02020000
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+#endif
+#if PY_VERSION_HEX >= 0x02030000
+ 0, /* tp_del */
+#endif
+#if PY_VERSION_HEX >= 0x02060000
+ 0, /* tp_version */
+#endif
+#ifdef COUNT_ALLOCS
+ 0,0,0,0 /* tp_alloc -> tp_next */
+#endif
+ };
+ swigpypacked_type = tmp;
+ type_init = 1;
+#if PY_VERSION_HEX < 0x02020000
+ swigpypacked_type.ob_type = &PyType_Type;
+#else
+ if (PyType_Ready(&swigpypacked_type) < 0)
+ return NULL;
+#endif
+ }
+ return &swigpypacked_type;
+}
+
+SWIGRUNTIME PyObject *
+SwigPyPacked_New(void *ptr, size_t size, swig_type_info *ty)
+{
+ SwigPyPacked *sobj = PyObject_NEW(SwigPyPacked, SwigPyPacked_type());
+ if (sobj) {
+ void *pack = malloc(size);
+ if (pack) {
+ memcpy(pack, ptr, size);
+ sobj->pack = pack;
+ sobj->ty = ty;
+ sobj->size = size;
+ } else {
+ PyObject_DEL((PyObject *) sobj);
+ sobj = 0;
+ }
+ }
+ return (PyObject *) sobj;
+}
+
+SWIGRUNTIME swig_type_info *
+SwigPyPacked_UnpackData(PyObject *obj, void *ptr, size_t size)
+{
+ if (SwigPyPacked_Check(obj)) {
+ SwigPyPacked *sobj = (SwigPyPacked *)obj;
+ if (sobj->size != size) return 0;
+ memcpy(ptr, sobj->pack, size);
+ return sobj->ty;
+ } else {
+ return 0;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * pointers/data manipulation
+ * ----------------------------------------------------------------------------- */
+
+SWIGRUNTIMEINLINE PyObject *
+_SWIG_This(void)
+{
+ return SWIG_Python_str_FromChar("this");
+}
+
+static PyObject *swig_this = NULL;
+
+SWIGRUNTIME PyObject *
+SWIG_This(void)
+{
+ if (swig_this == NULL)
+ swig_this = _SWIG_This();
+ return swig_this;
+}
+
+/* #define SWIG_PYTHON_SLOW_GETSET_THIS */
+
+/* TODO: I don't know how to implement the fast getset in Python 3 right now */
+#if PY_VERSION_HEX>=0x03000000
+#define SWIG_PYTHON_SLOW_GETSET_THIS
+#endif
+
+SWIGRUNTIME SwigPyObject *
+SWIG_Python_GetSwigThis(PyObject *pyobj)
+{
+ PyObject *obj;
+
+ if (SwigPyObject_Check(pyobj))
+ return (SwigPyObject *) pyobj;
+
+#ifdef SWIGPYTHON_BUILTIN
+ (void)obj;
+# ifdef PyWeakref_CheckProxy
+ if (PyWeakref_CheckProxy(pyobj)) {
+ pyobj = PyWeakref_GET_OBJECT(pyobj);
+ if (pyobj && SwigPyObject_Check(pyobj))
+ return (SwigPyObject*) pyobj;
+ }
+# endif
+ return NULL;
+#else
+
+ obj = 0;
+
+#if (!defined(SWIG_PYTHON_SLOW_GETSET_THIS) && (PY_VERSION_HEX >= 0x02030000))
+ if (PyInstance_Check(pyobj)) {
+ obj = _PyInstance_Lookup(pyobj, SWIG_This());
+ } else {
+ PyObject **dictptr = _PyObject_GetDictPtr(pyobj);
+ if (dictptr != NULL) {
+ PyObject *dict = *dictptr;
+ obj = dict ? PyDict_GetItem(dict, SWIG_This()) : 0;
+ } else {
+#ifdef PyWeakref_CheckProxy
+ if (PyWeakref_CheckProxy(pyobj)) {
+ PyObject *wobj = PyWeakref_GET_OBJECT(pyobj);
+ return wobj ? SWIG_Python_GetSwigThis(wobj) : 0;
+ }
+#endif
+ obj = PyObject_GetAttr(pyobj,SWIG_This());
+ if (obj) {
+ Py_DECREF(obj);
+ } else {
+ if (PyErr_Occurred()) PyErr_Clear();
+ return 0;
+ }
+ }
+ }
+#else
+ obj = PyObject_GetAttr(pyobj,SWIG_This());
+ if (obj) {
+ Py_DECREF(obj);
+ } else {
+ if (PyErr_Occurred()) PyErr_Clear();
+ return 0;
+ }
+#endif
+ if (obj && !SwigPyObject_Check(obj)) {
+ /* a PyObject is called 'this', try to get the 'real this'
+ SwigPyObject from it */
+ return SWIG_Python_GetSwigThis(obj);
+ }
+ return (SwigPyObject *)obj;
+#endif
+}
+
+/* Acquire a pointer value */
+
+SWIGRUNTIME int
+SWIG_Python_AcquirePtr(PyObject *obj, int own) {
+ if (own == SWIG_POINTER_OWN) {
+ SwigPyObject *sobj = SWIG_Python_GetSwigThis(obj);
+ if (sobj) {
+ int oldown = sobj->own;
+ sobj->own = own;
+ return oldown;
+ }
+ }
+ return 0;
+}
+
+/* Convert a pointer value */
+
+SWIGRUNTIME int
+SWIG_Python_ConvertPtrAndOwn(PyObject *obj, void **ptr, swig_type_info *ty, int flags, int *own) {
+ int res;
+ SwigPyObject *sobj;
+
+ if (!obj)
+ return SWIG_ERROR;
+ if (obj == Py_None) {
+ if (ptr)
+ *ptr = 0;
+ return SWIG_OK;
+ }
+
+ res = SWIG_ERROR;
+
+ sobj = SWIG_Python_GetSwigThis(obj);
+ if (own)
+ *own = 0;
+ while (sobj) {
+ void *vptr = sobj->ptr;
+ if (ty) {
+ swig_type_info *to = sobj->ty;
+ if (to == ty) {
+ /* no type cast needed */
+ if (ptr) *ptr = vptr;
+ break;
+ } else {
+ swig_cast_info *tc = SWIG_TypeCheck(to->name,ty);
+ if (!tc) {
+ sobj = (SwigPyObject *)sobj->next;
+ } else {
+ if (ptr) {
+ int newmemory = 0;
+ *ptr = SWIG_TypeCast(tc,vptr,&newmemory);
+ if (newmemory == SWIG_CAST_NEW_MEMORY) {
+ assert(own); /* badly formed typemap which will lead to a memory leak - it must set and use own to delete *ptr */
+ if (own)
+ *own = *own | SWIG_CAST_NEW_MEMORY;
+ }
+ }
+ break;
+ }
+ }
+ } else {
+ if (ptr) *ptr = vptr;
+ break;
+ }
+ }
+ if (sobj) {
+ if (own)
+ *own = *own | sobj->own;
+ if (flags & SWIG_POINTER_DISOWN) {
+ sobj->own = 0;
+ }
+ res = SWIG_OK;
+ } else {
+ if (flags & SWIG_POINTER_IMPLICIT_CONV) {
+ SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0;
+ if (data && !data->implicitconv) {
+ PyObject *klass = data->klass;
+ if (klass) {
+ PyObject *impconv;
+ data->implicitconv = 1; /* avoid recursion and call 'explicit' constructors*/
+ impconv = SWIG_Python_CallFunctor(klass, obj);
+ data->implicitconv = 0;
+ if (PyErr_Occurred()) {
+ PyErr_Clear();
+ impconv = 0;
+ }
+ if (impconv) {
+ SwigPyObject *iobj = SWIG_Python_GetSwigThis(impconv);
+ if (iobj) {
+ void *vptr;
+ res = SWIG_Python_ConvertPtrAndOwn((PyObject*)iobj, &vptr, ty, 0, 0);
+ if (SWIG_IsOK(res)) {
+ if (ptr) {
+ *ptr = vptr;
+ /* transfer the ownership to 'ptr' */
+ iobj->own = 0;
+ res = SWIG_AddCast(res);
+ res = SWIG_AddNewMask(res);
+ } else {
+ res = SWIG_AddCast(res);
+ }
+ }
+ }
+ Py_DECREF(impconv);
+ }
+ }
+ }
+ }
+ }
+ return res;
+}
+
+/* Convert a function ptr value */
+
+SWIGRUNTIME int
+SWIG_Python_ConvertFunctionPtr(PyObject *obj, void **ptr, swig_type_info *ty) {
+ if (!PyCFunction_Check(obj)) {
+ return SWIG_ConvertPtr(obj, ptr, ty, 0);
+ } else {
+ void *vptr = 0;
+
+ /* here we get the method pointer for callbacks */
+ const char *doc = (((PyCFunctionObject *)obj) -> m_ml -> ml_doc);
+ const char *desc = doc ? strstr(doc, "swig_ptr: ") : 0;
+ if (desc)
+ desc = ty ? SWIG_UnpackVoidPtr(desc + 10, &vptr, ty->name) : 0;
+ if (!desc)
+ return SWIG_ERROR;
+ if (ty) {
+ swig_cast_info *tc = SWIG_TypeCheck(desc,ty);
+ if (tc) {
+ int newmemory = 0;
+ *ptr = SWIG_TypeCast(tc,vptr,&newmemory);
+ assert(!newmemory); /* newmemory handling not yet implemented */
+ } else {
+ return SWIG_ERROR;
+ }
+ } else {
+ *ptr = vptr;
+ }
+ return SWIG_OK;
+ }
+}
+
+/* Convert a packed value value */
+
+SWIGRUNTIME int
+SWIG_Python_ConvertPacked(PyObject *obj, void *ptr, size_t sz, swig_type_info *ty) {
+ swig_type_info *to = SwigPyPacked_UnpackData(obj, ptr, sz);
+ if (!to) return SWIG_ERROR;
+ if (ty) {
+ if (to != ty) {
+ /* check type cast? */
+ swig_cast_info *tc = SWIG_TypeCheck(to->name,ty);
+ if (!tc) return SWIG_ERROR;
+ }
+ }
+ return SWIG_OK;
+}
+
+/* -----------------------------------------------------------------------------
+ * Create a new pointer object
+ * ----------------------------------------------------------------------------- */
+
+/*
+ Create a new instance object, without calling __init__, and set the
+ 'this' attribute.
+*/
+
+SWIGRUNTIME PyObject*
+SWIG_Python_NewShadowInstance(SwigPyClientData *data, PyObject *swig_this)
+{
+#if (PY_VERSION_HEX >= 0x02020000)
+ PyObject *inst = 0;
+ PyObject *newraw = data->newraw;
+ if (newraw) {
+ inst = PyObject_Call(newraw, data->newargs, NULL);
+ if (inst) {
+#if !defined(SWIG_PYTHON_SLOW_GETSET_THIS)
+ PyObject **dictptr = _PyObject_GetDictPtr(inst);
+ if (dictptr != NULL) {
+ PyObject *dict = *dictptr;
+ if (dict == NULL) {
+ dict = PyDict_New();
+ *dictptr = dict;
+ PyDict_SetItem(dict, SWIG_This(), swig_this);
+ }
+ }
+#else
+ PyObject *key = SWIG_This();
+ PyObject_SetAttr(inst, key, swig_this);
+#endif
+ }
+ } else {
+#if PY_VERSION_HEX >= 0x03000000
+ inst = PyBaseObject_Type.tp_new((PyTypeObject*) data->newargs, Py_None, Py_None);
+ if (inst) {
+ PyObject_SetAttr(inst, SWIG_This(), swig_this);
+ Py_TYPE(inst)->tp_flags &= ~Py_TPFLAGS_VALID_VERSION_TAG;
+ }
+#else
+ PyObject *dict = PyDict_New();
+ if (dict) {
+ PyDict_SetItem(dict, SWIG_This(), swig_this);
+ inst = PyInstance_NewRaw(data->newargs, dict);
+ Py_DECREF(dict);
+ }
+#endif
+ }
+ return inst;
+#else
+#if (PY_VERSION_HEX >= 0x02010000)
+ PyObject *inst = 0;
+ PyObject *dict = PyDict_New();
+ if (dict) {
+ PyDict_SetItem(dict, SWIG_This(), swig_this);
+ inst = PyInstance_NewRaw(data->newargs, dict);
+ Py_DECREF(dict);
+ }
+ return (PyObject *) inst;
+#else
+ PyInstanceObject *inst = PyObject_NEW(PyInstanceObject, &PyInstance_Type);
+ if (inst == NULL) {
+ return NULL;
+ }
+ inst->in_class = (PyClassObject *)data->newargs;
+ Py_INCREF(inst->in_class);
+ inst->in_dict = PyDict_New();
+ if (inst->in_dict == NULL) {
+ Py_DECREF(inst);
+ return NULL;
+ }
+#ifdef Py_TPFLAGS_HAVE_WEAKREFS
+ inst->in_weakreflist = NULL;
+#endif
+#ifdef Py_TPFLAGS_GC
+ PyObject_GC_Init(inst);
+#endif
+ PyDict_SetItem(inst->in_dict, SWIG_This(), swig_this);
+ return (PyObject *) inst;
+#endif
+#endif
+}
+
+SWIGRUNTIME void
+SWIG_Python_SetSwigThis(PyObject *inst, PyObject *swig_this)
+{
+ PyObject *dict;
+#if (PY_VERSION_HEX >= 0x02020000) && !defined(SWIG_PYTHON_SLOW_GETSET_THIS)
+ PyObject **dictptr = _PyObject_GetDictPtr(inst);
+ if (dictptr != NULL) {
+ dict = *dictptr;
+ if (dict == NULL) {
+ dict = PyDict_New();
+ *dictptr = dict;
+ }
+ PyDict_SetItem(dict, SWIG_This(), swig_this);
+ return;
+ }
+#endif
+ dict = PyObject_GetAttrString(inst, (char*)"__dict__");
+ PyDict_SetItem(dict, SWIG_This(), swig_this);
+ Py_DECREF(dict);
+}
+
+
+SWIGINTERN PyObject *
+SWIG_Python_InitShadowInstance(PyObject *args) {
+ PyObject *obj[2];
+ if (!SWIG_Python_UnpackTuple(args, "swiginit", 2, 2, obj)) {
+ return NULL;
+ } else {
+ SwigPyObject *sthis = SWIG_Python_GetSwigThis(obj[0]);
+ if (sthis) {
+ SwigPyObject_append((PyObject*) sthis, obj[1]);
+ } else {
+ SWIG_Python_SetSwigThis(obj[0], obj[1]);
+ }
+ return SWIG_Py_Void();
+ }
+}
+
+/* Create a new pointer object */
+
+SWIGRUNTIME PyObject *
+SWIG_Python_NewPointerObj(PyObject *self, void *ptr, swig_type_info *type, int flags) {
+ SwigPyClientData *clientdata;
+ PyObject * robj;
+ int own;
+
+ if (!ptr)
+ return SWIG_Py_Void();
+
+ clientdata = type ? (SwigPyClientData *)(type->clientdata) : 0;
+ own = (flags & SWIG_POINTER_OWN) ? SWIG_POINTER_OWN : 0;
+ if (clientdata && clientdata->pytype) {
+ SwigPyObject *newobj;
+ if (flags & SWIG_BUILTIN_TP_INIT) {
+ newobj = (SwigPyObject*) self;
+ if (newobj->ptr) {
+ PyObject *next_self = clientdata->pytype->tp_alloc(clientdata->pytype, 0);
+ while (newobj->next)
+ newobj = (SwigPyObject *) newobj->next;
+ newobj->next = next_self;
+ newobj = (SwigPyObject *)next_self;
+ }
+ } else {
+ newobj = PyObject_New(SwigPyObject, clientdata->pytype);
+ }
+ if (newobj) {
+ newobj->ptr = ptr;
+ newobj->ty = type;
+ newobj->own = own;
+ newobj->next = 0;
+#ifdef SWIGPYTHON_BUILTIN
+ newobj->dict = 0;
+#endif
+ return (PyObject*) newobj;
+ }
+ return SWIG_Py_Void();
+ }
+
+ assert(!(flags & SWIG_BUILTIN_TP_INIT));
+
+ robj = SwigPyObject_New(ptr, type, own);
+ if (robj && clientdata && !(flags & SWIG_POINTER_NOSHADOW)) {
+ PyObject *inst = SWIG_Python_NewShadowInstance(clientdata, robj);
+ Py_DECREF(robj);
+ robj = inst;
+ }
+ return robj;
+}
+
+/* Create a new packed object */
+
+SWIGRUNTIMEINLINE PyObject *
+SWIG_Python_NewPackedObj(void *ptr, size_t sz, swig_type_info *type) {
+ return ptr ? SwigPyPacked_New((void *) ptr, sz, type) : SWIG_Py_Void();
+}
+
+/* -----------------------------------------------------------------------------*
+ * Get type list
+ * -----------------------------------------------------------------------------*/
+
+#ifdef SWIG_LINK_RUNTIME
+void *SWIG_ReturnGlobalTypeList(void *);
+#endif
+
+SWIGRUNTIME swig_module_info *
+SWIG_Python_GetModule(void *SWIGUNUSEDPARM(clientdata)) {
+ static void *type_pointer = (void *)0;
+ /* first check if module already created */
+ if (!type_pointer) {
+#ifdef SWIG_LINK_RUNTIME
+ type_pointer = SWIG_ReturnGlobalTypeList((void *)0);
+#else
+# ifdef SWIGPY_USE_CAPSULE
+ type_pointer = PyCapsule_Import(SWIGPY_CAPSULE_NAME, 0);
+# else
+ type_pointer = PyCObject_Import((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION,
+ (char*)"type_pointer" SWIG_TYPE_TABLE_NAME);
+# endif
+ if (PyErr_Occurred()) {
+ PyErr_Clear();
+ type_pointer = (void *)0;
+ }
+#endif
+ }
+ return (swig_module_info *) type_pointer;
+}
+
+#if PY_MAJOR_VERSION < 2
+/* PyModule_AddObject function was introduced in Python 2.0. The following function
+ is copied out of Python/modsupport.c in python version 2.3.4 */
+SWIGINTERN int
+PyModule_AddObject(PyObject *m, char *name, PyObject *o)
+{
+ PyObject *dict;
+ if (!PyModule_Check(m)) {
+ PyErr_SetString(PyExc_TypeError,
+ "PyModule_AddObject() needs module as first arg");
+ return SWIG_ERROR;
+ }
+ if (!o) {
+ PyErr_SetString(PyExc_TypeError,
+ "PyModule_AddObject() needs non-NULL value");
+ return SWIG_ERROR;
+ }
+
+ dict = PyModule_GetDict(m);
+ if (dict == NULL) {
+ /* Internal error -- modules must have a dict! */
+ PyErr_Format(PyExc_SystemError, "module '%s' has no __dict__",
+ PyModule_GetName(m));
+ return SWIG_ERROR;
+ }
+ if (PyDict_SetItemString(dict, name, o))
+ return SWIG_ERROR;
+ Py_DECREF(o);
+ return SWIG_OK;
+}
+#endif
+
+SWIGRUNTIME void
+#ifdef SWIGPY_USE_CAPSULE
+SWIG_Python_DestroyModule(PyObject *obj)
+#else
+SWIG_Python_DestroyModule(void *vptr)
+#endif
+{
+#ifdef SWIGPY_USE_CAPSULE
+ swig_module_info *swig_module = (swig_module_info *) PyCapsule_GetPointer(obj, SWIGPY_CAPSULE_NAME);
+#else
+ swig_module_info *swig_module = (swig_module_info *) vptr;
+#endif
+ swig_type_info **types = swig_module->types;
+ size_t i;
+ for (i =0; i < swig_module->size; ++i) {
+ swig_type_info *ty = types[i];
+ if (ty->owndata) {
+ SwigPyClientData *data = (SwigPyClientData *) ty->clientdata;
+ if (data) SwigPyClientData_Del(data);
+ }
+ }
+ Py_DECREF(SWIG_This());
+ swig_this = NULL;
+}
+
+SWIGRUNTIME void
+SWIG_Python_SetModule(swig_module_info *swig_module) {
+#if PY_VERSION_HEX >= 0x03000000
+ /* Add a dummy module object into sys.modules */
+ PyObject *module = PyImport_AddModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION);
+#else
+ static PyMethodDef swig_empty_runtime_method_table[] = { {NULL, NULL, 0, NULL} }; /* Sentinel */
+ PyObject *module = Py_InitModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION, swig_empty_runtime_method_table);
+#endif
+#ifdef SWIGPY_USE_CAPSULE
+ PyObject *pointer = PyCapsule_New((void *) swig_module, SWIGPY_CAPSULE_NAME, SWIG_Python_DestroyModule);
+ if (pointer && module) {
+ PyModule_AddObject(module, (char*)"type_pointer_capsule" SWIG_TYPE_TABLE_NAME, pointer);
+ } else {
+ Py_XDECREF(pointer);
+ }
+#else
+ PyObject *pointer = PyCObject_FromVoidPtr((void *) swig_module, SWIG_Python_DestroyModule);
+ if (pointer && module) {
+ PyModule_AddObject(module, (char*)"type_pointer" SWIG_TYPE_TABLE_NAME, pointer);
+ } else {
+ Py_XDECREF(pointer);
+ }
+#endif
+}
+
+/* The python cached type query */
+SWIGRUNTIME PyObject *
+SWIG_Python_TypeCache(void) {
+ static PyObject *SWIG_STATIC_POINTER(cache) = PyDict_New();
+ return cache;
+}
+
+SWIGRUNTIME swig_type_info *
+SWIG_Python_TypeQuery(const char *type)
+{
+ PyObject *cache = SWIG_Python_TypeCache();
+ PyObject *key = SWIG_Python_str_FromChar(type);
+ PyObject *obj = PyDict_GetItem(cache, key);
+ swig_type_info *descriptor;
+ if (obj) {
+#ifdef SWIGPY_USE_CAPSULE
+ descriptor = (swig_type_info *) PyCapsule_GetPointer(obj, NULL);
+#else
+ descriptor = (swig_type_info *) PyCObject_AsVoidPtr(obj);
+#endif
+ } else {
+ swig_module_info *swig_module = SWIG_GetModule(0);
+ descriptor = SWIG_TypeQueryModule(swig_module, swig_module, type);
+ if (descriptor) {
+#ifdef SWIGPY_USE_CAPSULE
+ obj = PyCapsule_New((void*) descriptor, NULL, NULL);
+#else
+ obj = PyCObject_FromVoidPtr(descriptor, NULL);
+#endif
+ PyDict_SetItem(cache, key, obj);
+ Py_DECREF(obj);
+ }
+ }
+ Py_DECREF(key);
+ return descriptor;
+}
+
+/*
+ For backward compatibility only
+*/
+#define SWIG_POINTER_EXCEPTION 0
+#define SWIG_arg_fail(arg) SWIG_Python_ArgFail(arg)
+#define SWIG_MustGetPtr(p, type, argnum, flags) SWIG_Python_MustGetPtr(p, type, argnum, flags)
+
+SWIGRUNTIME int
+SWIG_Python_AddErrMesg(const char* mesg, int infront)
+{
+ if (PyErr_Occurred()) {
+ PyObject *type = 0;
+ PyObject *value = 0;
+ PyObject *traceback = 0;
+ PyErr_Fetch(&type, &value, &traceback);
+ if (value) {
+ char *tmp;
+ PyObject *old_str = PyObject_Str(value);
+ Py_XINCREF(type);
+ PyErr_Clear();
+ if (infront) {
+ PyErr_Format(type, "%s %s", mesg, tmp = SWIG_Python_str_AsChar(old_str));
+ } else {
+ PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg);
+ }
+ SWIG_Python_str_DelForPy3(tmp);
+ Py_DECREF(old_str);
+ }
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+SWIGRUNTIME int
+SWIG_Python_ArgFail(int argnum)
+{
+ if (PyErr_Occurred()) {
+ /* add information about failing argument */
+ char mesg[256];
+ PyOS_snprintf(mesg, sizeof(mesg), "argument number %d:", argnum);
+ return SWIG_Python_AddErrMesg(mesg, 1);
+ } else {
+ return 0;
+ }
+}
+
+SWIGRUNTIMEINLINE const char *
+SwigPyObject_GetDesc(PyObject *self)
+{
+ SwigPyObject *v = (SwigPyObject *)self;
+ swig_type_info *ty = v ? v->ty : 0;
+ return ty ? ty->str : "";
+}
+
+SWIGRUNTIME void
+SWIG_Python_TypeError(const char *type, PyObject *obj)
+{
+ if (type) {
+#if defined(SWIG_COBJECT_TYPES)
+ if (obj && SwigPyObject_Check(obj)) {
+ const char *otype = (const char *) SwigPyObject_GetDesc(obj);
+ if (otype) {
+ PyErr_Format(PyExc_TypeError, "a '%s' is expected, 'SwigPyObject(%s)' is received",
+ type, otype);
+ return;
+ }
+ } else
+#endif
+ {
+ const char *otype = (obj ? obj->ob_type->tp_name : 0);
+ if (otype) {
+ PyObject *str = PyObject_Str(obj);
+ const char *cstr = str ? SWIG_Python_str_AsChar(str) : 0;
+ if (cstr) {
+ PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s(%s)' is received",
+ type, otype, cstr);
+ SWIG_Python_str_DelForPy3(cstr);
+ } else {
+ PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s' is received",
+ type, otype);
+ }
+ Py_XDECREF(str);
+ return;
+ }
+ }
+ PyErr_Format(PyExc_TypeError, "a '%s' is expected", type);
+ } else {
+ PyErr_Format(PyExc_TypeError, "unexpected type is received");
+ }
+}
+
+
+/* Convert a pointer value, signal an exception on a type mismatch */
+SWIGRUNTIME void *
+SWIG_Python_MustGetPtr(PyObject *obj, swig_type_info *ty, int SWIGUNUSEDPARM(argnum), int flags) {
+ void *result;
+ if (SWIG_Python_ConvertPtr(obj, &result, ty, flags) == -1) {
+ PyErr_Clear();
+#if SWIG_POINTER_EXCEPTION
+ if (flags) {
+ SWIG_Python_TypeError(SWIG_TypePrettyName(ty), obj);
+ SWIG_Python_ArgFail(argnum);
+ }
+#endif
+ }
+ return result;
+}
+
+#ifdef SWIGPYTHON_BUILTIN
+SWIGRUNTIME int
+SWIG_Python_NonDynamicSetAttr(PyObject *obj, PyObject *name, PyObject *value) {
+ PyTypeObject *tp = obj->ob_type;
+ PyObject *descr;
+ PyObject *encoded_name;
+ descrsetfunc f;
+ int res;
+
+# ifdef Py_USING_UNICODE
+ if (PyString_Check(name)) {
+ name = PyUnicode_Decode(PyString_AsString(name), PyString_Size(name), NULL, NULL);
+ if (!name)
+ return -1;
+ } else if (!PyUnicode_Check(name))
+# else
+ if (!PyString_Check(name))
+# endif
+ {
+ PyErr_Format(PyExc_TypeError, "attribute name must be string, not '%.200s'", name->ob_type->tp_name);
+ return -1;
+ } else {
+ Py_INCREF(name);
+ }
+
+ if (!tp->tp_dict) {
+ if (PyType_Ready(tp) < 0)
+ goto done;
+ }
+
+ res = -1;
+ descr = _PyType_Lookup(tp, name);
+ f = NULL;
+ if (descr != NULL)
+ f = descr->ob_type->tp_descr_set;
+ if (!f) {
+ if (PyString_Check(name)) {
+ encoded_name = name;
+ Py_INCREF(name);
+ } else {
+ encoded_name = PyUnicode_AsUTF8String(name);
+ }
+ PyErr_Format(PyExc_AttributeError, "'%.100s' object has no attribute '%.200s'", tp->tp_name, PyString_AsString(encoded_name));
+ Py_DECREF(encoded_name);
+ } else {
+ res = f(descr, obj, value);
+ }
+
+ done:
+ Py_DECREF(name);
+ return res;
+}
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#define SWIG_exception_fail(code, msg) do { SWIG_Error(code, msg); SWIG_fail; } while(0)
+
+#define SWIG_contract_assert(expr, msg) if (!(expr)) { SWIG_Error(SWIG_RuntimeError, msg); SWIG_fail; } else
+
+
+/* -----------------------------------------------------------------------------
+ * director.swg
+ *
+ * This file contains support for director classes that proxy
+ * method calls from C++ to Python extensions.
+ * ----------------------------------------------------------------------------- */
+
+#ifndef SWIG_DIRECTOR_PYTHON_HEADER_
+#define SWIG_DIRECTOR_PYTHON_HEADER_
+
+#ifdef __cplusplus
+
+#include
+#include
+#include
+#include
+#include