<!DOCTYPE HTML>
<html lang="en" class="sidebar-visible no-js coal">
    <head>
        <!-- Book generated using mdBook -->
        <meta charset="UTF-8">
        <title>How to create YOLOv8-based object detection web service using Python, Julia, Node.js, JavaScript, Go and Rust - Andrew&#x27;s Blog</title>


        <!-- Custom HTML head -->
        
        <meta name="description" content="Andrew Ryan&#x27;s Blog">
        <meta name="viewport" content="width=device-width, initial-scale=1">
        <meta name="theme-color" content="#ffffff" />

        <link rel="icon" href="../../favicon.svg">
        <link rel="shortcut icon" href="../../favicon.png">
        <link rel="stylesheet" href="../../css/variables.css">
        <link rel="stylesheet" href="../../css/general.css">
        <link rel="stylesheet" href="../../css/chrome.css">

        <!-- Fonts -->
        <link rel="stylesheet" href="../../FontAwesome/css/font-awesome.css">
        <link rel="stylesheet" href="../../fonts/fonts.css">

        <!-- Highlight.js Stylesheets -->
        <link rel="stylesheet" href="../../highlight.css">
        <link rel="stylesheet" href="../../tomorrow-night.css">
        <link rel="stylesheet" href="../../ayu-highlight.css">

        <!-- Custom theme stylesheets -->
        <link rel="stylesheet" href="../../src/style/custom.css">

        <!-- MathJax -->
        <script async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
    </head>
    <body>
    <div id="body-container">
        <!-- Provide site root to javascript -->
        <script>
            var path_to_root = "../../";
            var default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "coal" : "coal";
        </script>

        <!-- Work around some values being stored in localStorage wrapped in quotes -->
        <script>
            try {
                var theme = localStorage.getItem('mdbook-theme');
                var sidebar = localStorage.getItem('mdbook-sidebar');

                if (theme.startsWith('"') && theme.endsWith('"')) {
                    localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
                }

                if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
                    localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
                }
            } catch (e) { }
        </script>

        <!-- Set the theme before any content is loaded, prevents flash -->
        <script>
            var theme;
            try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
            if (theme === null || theme === undefined) { theme = default_theme; }
            var html = document.querySelector('html');
            html.classList.remove('no-js')
            html.classList.remove('coal')
            html.classList.add(theme);
            html.classList.add('js');
        </script>

        <!-- Hide / unhide sidebar before it is displayed -->
        <script>
            var html = document.querySelector('html');
            var sidebar = null;
            if (document.body.clientWidth >= 1080) {
                try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
                sidebar = sidebar || 'visible';
            } else {
                sidebar = 'hidden';
            }
            html.classList.remove('sidebar-visible');
            html.classList.add("sidebar-" + sidebar);
        </script>

        <nav id="sidebar" class="sidebar" aria-label="Table of contents">
            <div class="sidebar-scrollbox">
                <ol class="chapter"><li class="chapter-item affix "><a href="../../index.html">Andrew's Blog</a></li><li class="chapter-item "><a href="../../posts/linux/linux.html"><strong aria-hidden="true">1.</strong> Linux</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/linux/install_linux.html"><strong aria-hidden="true">1.1.</strong> install linux</a></li><li class="chapter-item "><a href="../../posts/linux/bash_profile.html"><strong aria-hidden="true">1.2.</strong> bash profile</a></li><li class="chapter-item "><a href="../../posts/linux/command_list.html"><strong aria-hidden="true">1.3.</strong> command list</a></li><li class="chapter-item "><a href="../../posts/linux/git_guide.html"><strong aria-hidden="true">1.4.</strong> git guide</a></li><li class="chapter-item "><a href="../../posts/linux/tar.html"><strong aria-hidden="true">1.5.</strong> tar</a></li><li class="chapter-item "><a href="../../posts/Linux/git_cheatsheet.html"><strong aria-hidden="true">1.6.</strong> Git Cheatsheet</a></li><li class="chapter-item "><a href="../../posts/Linux/bash_cheatsheet.html"><strong aria-hidden="true">1.7.</strong> Bash Cheatsheet</a></li></ol></li><li class="chapter-item "><a href="../../posts/macos/mac.html"><strong aria-hidden="true">2.</strong> MacOS</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/macos/macos_profiles.html"><strong aria-hidden="true">2.1.</strong> macos profiles</a></li><li class="chapter-item "><a href="../../posts/macos/macos_pwn_env_setup.html"><strong aria-hidden="true">2.2.</strong> macos pwn env setup</a></li></ol></li><li class="chapter-item "><a href="../../posts/swift/swift.html"><strong aria-hidden="true">3.</strong> Swift</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/swift/learn_swift.html"><strong aria-hidden="true">3.1.</strong> learn swift basics</a></li><li class="chapter-item "><a href="../../posts/swift/swift_extensions.html"><strong aria-hidden="true">3.2.</strong> Swift extensions</a></li><li class="chapter-item "><a href="../../posts/swift/swiftui_extension.html"><strong aria-hidden="true">3.3.</strong> SwiftUI extensions</a></li><li class="chapter-item "><a href="../../posts/swift/install_swift.html"><strong aria-hidden="true">3.4.</strong> install swift</a></li><li class="chapter-item "><a href="../../posts/swift/task_planner.html"><strong aria-hidden="true">3.5.</strong> implment task panner app with SwiftUI</a></li><li class="chapter-item "><a href="../../posts/swift/swift_cheat_sheet.html"><strong aria-hidden="true">3.6.</strong> Swift Cheat Sheet</a></li><li class="chapter-item "><a href="../../posts/swift/yinci_url.html"><strong aria-hidden="true">3.7.</strong> Personal privacy protocol</a></li><li class="chapter-item "><a href="../../posts/swift/swift_regular_exressions.html"><strong aria-hidden="true">3.8.</strong> Swift regular exressions</a></li><li class="chapter-item "><a href="../../posts/ios/how_to_create_beautiful_ios_charts_in_swift.html"><strong aria-hidden="true">3.9.</strong> How to Create Beautiful iOS Charts in Swift</a></li><li class="chapter-item "><a href="../../posts/swift/swiftui_source_code.html"><strong aria-hidden="true">3.10.</strong> SwiftUI source code</a></li><li class="chapter-item "><a href="../../posts/swift/use_swift_fetch_iciba_api.html"><strong aria-hidden="true">3.11.</strong> use swift fetch iciba API</a></li></ol></li><li class="chapter-item "><a href="../../posts/ios/ios.html"><strong aria-hidden="true">4.</strong> iOS</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/ios/cocaposd_setup_and_install_for_ios_project.html"><strong aria-hidden="true">4.1.</strong> cocaposd setup and install for ios project</a></li><li class="chapter-item "><a href="../../posts/ios/swiftui_show_gif_image.html"><strong aria-hidden="true">4.2.</strong> SwiftUI show gif image</a></li><li class="chapter-item "><a href="../../posts/ios/implement_task_planner_app.html"><strong aria-hidden="true">4.3.</strong> implement Task planner App</a></li></ol></li><li class="chapter-item "><a href="../../posts/objective_c/objective_c.html"><strong aria-hidden="true">5.</strong> Objective-C</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/objective_c/objective_c_cheat_sheet.html"><strong aria-hidden="true">5.1.</strong> Objective-C Cheat Sheet</a></li><li class="chapter-item "><a href="../../posts/objective_c/objective_c_for_absolute_beginners_read_note.html"><strong aria-hidden="true">5.2.</strong> Objective-C Note</a></li></ol></li><li class="chapter-item "><a href="../../posts/dart/dart.html"><strong aria-hidden="true">6.</strong> Dart</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/dart/flutter.html"><strong aria-hidden="true">6.1.</strong> Flutter Cheat Sheet</a></li><li class="chapter-item "><a href="../../posts/dart/dart_cheat_sheet.html"><strong aria-hidden="true">6.2.</strong> Dart Cheat Sheet</a></li><li class="chapter-item "><a href="../../posts/flutter/flutter_dev_test.html"><strong aria-hidden="true">6.3.</strong> Flutter dev test</a></li></ol></li><li class="chapter-item expanded "><a href="../../posts/rust/rust.html"><strong aria-hidden="true">7.</strong> Rust</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/rust/offline_use_rust.html"><strong aria-hidden="true">7.1.</strong> Offline use rust</a></li><li class="chapter-item "><a href="../../posts/rust/rust_grammer.html"><strong aria-hidden="true">7.2.</strong> rust grammar</a></li><li class="chapter-item "><a href="../../posts/rust/pase_string_and_decimal_conversion.html"><strong aria-hidden="true">7.3.</strong> pase string and decimal conversion</a></li><li class="chapter-item "><a href="../../posts/rust/parse_types.html"><strong aria-hidden="true">7.4.</strong> rust types</a></li><li class="chapter-item "><a href="../../posts/rust/rust_life_cycle.html"><strong aria-hidden="true">7.5.</strong> Rust life cycle</a></li><li class="chapter-item "><a href="../../posts/rust/rust_generic.html"><strong aria-hidden="true">7.6.</strong> rust generics</a></li><li class="chapter-item "><a href="../../posts/rust/rust_implment_matrix.html"><strong aria-hidden="true">7.7.</strong> Rust implement matrix</a></li><li class="chapter-item "><a href="../../posts/rust/rust_sort.html"><strong aria-hidden="true">7.8.</strong> Rust implement sort algorithms</a></li><li class="chapter-item "><a href="../../posts/rust/implement_aes_encryption.html"><strong aria-hidden="true">7.9.</strong> Rust implement AEC encryption and decryption</a></li><li class="chapter-item "><a href="../../posts/rust/implement_trie_data_structure.html"><strong aria-hidden="true">7.10.</strong> implement trie data structure</a></li><li class="chapter-item "><a href="../../posts/rust/rust_implement_tree.html"><strong aria-hidden="true">7.11.</strong> implement tree data_structure</a></li><li class="chapter-item "><a href="../../posts/rust/list_dir.html"><strong aria-hidden="true">7.12.</strong> list dir</a></li><li class="chapter-item "><a href="../../posts/rust/fast_way_to_implment_object_trait.html"><strong aria-hidden="true">7.13.</strong> fast way to implment object trait</a></li><li class="chapter-item "><a href="../../posts/rust/compress_rust_binary_size.html"><strong aria-hidden="true">7.14.</strong> compress rust binary size</a></li><li class="chapter-item "><a href="../../posts/rust/implment_file_upload_backend.html"><strong aria-hidden="true">7.15.</strong> impliment file upload</a></li><li class="chapter-item "><a href="../../posts/rust/this_is_add_post_cli_implementation_in_rust.html"><strong aria-hidden="true">7.16.</strong> this is add_post cli implementation in rust</a></li><li class="chapter-item "><a href="../../posts/rust/use_rust_implment_a_copyclipbord_cli.html"><strong aria-hidden="true">7.17.</strong> Use rust implment a copyclipbord CLI</a></li><li class="chapter-item "><a href="../../posts/rust/sqlite_database_add_delete_update_show_in_rust.html"><strong aria-hidden="true">7.18.</strong> sqlite database add delete update show in rust</a></li><li class="chapter-item "><a href="../../posts/rust/implementing_tokio_joinhandle_for_wasm.html"><strong aria-hidden="true">7.19.</strong> Implementing tokio JoinHandle for wasm</a></li><li class="chapter-item "><a href="../../posts/rust/rust_implement_a_crate_for_encode_and_decode_brainfuck_and_ook.html"><strong aria-hidden="true">7.20.</strong> rust implement a crate for encode and decode brainfuck and ook</a></li><li class="chapter-item "><a href="../../posts/rust/slint_builtin_elements.html"><strong aria-hidden="true">7.21.</strong> Slint Builtin Elements</a></li><li class="chapter-item "><a href="../../posts/rust/corporate_network_install_rust_on_windows.html"><strong aria-hidden="true">7.22.</strong> Corporate network install Rust on windows</a></li><li class="chapter-item "><a href="../../posts/rust/rust_binary_file_how_to_judge_static_link_or_dynamic_link_in_macos.html"><strong aria-hidden="true">7.23.</strong> rust binary file how to judge static link or dynamic link in Macos</a></li><li class="chapter-item "><a href="../../posts/rust/rust_binary_include_dir_and_get_contents.html"><strong aria-hidden="true">7.24.</strong> rust binary include dir and get contents</a></li><li class="chapter-item expanded "><a href="../../posts/rust/how_to_create_yolov8_based_object_detection_web_service_using_python,_julia,_node.js,_javascript,_go_and_rust.html" class="active"><strong aria-hidden="true">7.25.</strong> How to create YOLOv8-based object detection web service using Python, Julia, Node.js, JavaScript, Go and Rust</a></li><li class="chapter-item "><a href="../../posts/rust/implment_builder_proc_macro_for_command_struct.html"><strong aria-hidden="true">7.26.</strong> implment Builder proc-macro for Command struct</a></li></ol></li><li class="chapter-item "><a href="../../posts/java/java.html"><strong aria-hidden="true">8.</strong> Java</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/java/java_grammar.html"><strong aria-hidden="true">8.1.</strong> java grammar and codewar</a></li><li class="chapter-item "><a href="../../posts/java/run_jar.html"><strong aria-hidden="true">8.2.</strong> java run .jar</a></li><li class="chapter-item "><a href="../../posts/java/java_pomxml_add_defaultgoal_to_build.html"><strong aria-hidden="true">8.3.</strong> Java pomxml add defaultGoal to build</a></li><li class="chapter-item "><a href="../../posts/java/java_set_mvn_mirror.html"><strong aria-hidden="true">8.4.</strong> Java set mvn mirror</a></li></ol></li><li class="chapter-item "><a href="../../posts/python/python.html"><strong aria-hidden="true">9.</strong> Python</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/python/convert_pesn.html"><strong aria-hidden="true">9.1.</strong> convert pesn</a></li><li class="chapter-item "><a href="../../posts/python/find_remove_dir.html"><strong aria-hidden="true">9.2.</strong> find and remove dir</a></li><li class="chapter-item "><a href="../../posts/python/timing_message.html"><strong aria-hidden="true">9.3.</strong> wechat send message</a></li><li class="chapter-item "><a href="../../posts/python/use_python_openpyxl_package_read_and_edit_excel_files.html"><strong aria-hidden="true">9.4.</strong> Use python openpyxl package read and edit excel files</a></li><li class="chapter-item "><a href="../../posts/python/sanctum_model_yaml.html"><strong aria-hidden="true">9.5.</strong> sanctum model yaml</a></li><li class="chapter-item "><a href="../../posts/python/how_to_detect_objects_on_images_using_the_yolov8_neural_network.html"><strong aria-hidden="true">9.6.</strong> How to detect objects on images using the YOLOv8 neural network</a></li><li class="chapter-item "><a href="../../posts/python/use_huggingface_model.html"><strong aria-hidden="true">9.7.</strong> use huggingface model</a></li></ol></li><li class="chapter-item "><a href="../../posts/go/go.html"><strong aria-hidden="true">10.</strong> Go</a></li><li class="chapter-item "><a href="../../posts/javascript/js.html"><strong aria-hidden="true">11.</strong> Javascript</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/javascript/js_tutorial.html"><strong aria-hidden="true">11.1.</strong> js tutorial</a></li><li class="chapter-item "><a href="../../posts/javascript/js_tutorial_map.html"><strong aria-hidden="true">11.2.</strong> ja map</a></li><li class="chapter-item "><a href="../../posts/javascript/js_tutorial_math.html"><strong aria-hidden="true">11.3.</strong> js math</a></li><li class="chapter-item "><a href="../../posts/javascript/js_tutorial_object.html"><strong aria-hidden="true">11.4.</strong> js object</a></li><li class="chapter-item "><a href="../../posts/javascript/js_tutorial_set.html"><strong aria-hidden="true">11.5.</strong> js set</a></li><li class="chapter-item "><a href="../../posts/javascript/single_thread_and_asynchronous.html"><strong aria-hidden="true">11.6.</strong> single thread and asynchronous</a></li><li class="chapter-item "><a href="../../posts/javascript/this.html"><strong aria-hidden="true">11.7.</strong> js this</a></li><li class="chapter-item "><a href="../../posts/javascript/js_implment_aes.html"><strong aria-hidden="true">11.8.</strong> js implment aes</a></li><li class="chapter-item "><a href="../../posts/javascript/getting_started_with_ajax.html"><strong aria-hidden="true">11.9.</strong> getting started with ajax</a></li><li class="chapter-item "><a href="../../posts/javascript/BinarySearchTree.html"><strong aria-hidden="true">11.10.</strong> binary search tree</a></li><li class="chapter-item "><a href="../../posts/javascript/goole_zx.html"><strong aria-hidden="true">11.11.</strong> goole zx</a></li><li class="chapter-item "><a href="../../posts/javascript/es6.html"><strong aria-hidden="true">11.12.</strong> es6</a></li></ol></li><li class="chapter-item "><a href="../../posts/ruby/ruby.html"><strong aria-hidden="true">12.</strong> Ruby</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/ruby/rails_setup_env.html"><strong aria-hidden="true">12.1.</strong> ruby on rails setup environment</a></li><li class="chapter-item "><a href="../../posts/ruby/learn_ruby.html"><strong aria-hidden="true">12.2.</strong> learn ruby</a></li><li class="chapter-item "><a href="../../posts/ruby/ruby_note.html"><strong aria-hidden="true">12.3.</strong> Ruby Note</a></li><li class="chapter-item "><a href="../../posts/ruby/setup_ruby_for_ctf.html"><strong aria-hidden="true">12.4.</strong> Setup ruby for CTF</a></li></ol></li><li class="chapter-item "><a href="../../posts/react/react.html"><strong aria-hidden="true">13.</strong> React</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/react/react_life_cycle.html"><strong aria-hidden="true">13.1.</strong> react life cycle</a></li><li class="chapter-item "><a href="../../posts/react/react_router.html"><strong aria-hidden="true">13.2.</strong> react router</a></li><li class="chapter-item "><a href="../../posts/react/react_this.html"><strong aria-hidden="true">13.3.</strong> react this</a></li><li class="chapter-item "><a href="../../posts/react/react_interviw.html"><strong aria-hidden="true">13.4.</strong> react interview</a></li><li class="chapter-item "><a href="../../posts/react/important_react_interview.html"><strong aria-hidden="true">13.5.</strong> important react interview</a></li><li class="chapter-item "><a href="../../posts/react/react_quick_reference.html"><strong aria-hidden="true">13.6.</strong> react quick reference</a></li><li class="chapter-item "><a href="../../posts/react/redux_quick_reference.html"><strong aria-hidden="true">13.7.</strong> redux quick reference</a></li></ol></li><li class="chapter-item "><a href="../../posts/vue/vue.html"><strong aria-hidden="true">14.</strong> Vue</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/vue/vue_ajax.html"><strong aria-hidden="true">14.1.</strong> vue ajax</a></li></ol></li><li class="chapter-item "><a href="../../posts/angular/angular.html"><strong aria-hidden="true">15.</strong> Angular</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/angular/controller_communication.html"><strong aria-hidden="true">15.1.</strong> controller communication</a></li><li class="chapter-item "><a href="../../posts/angular/creating_custom_directives.html"><strong aria-hidden="true">15.2.</strong> creating custom directives</a></li><li class="chapter-item "><a href="../../posts/angular/directive_notes.html"><strong aria-hidden="true">15.3.</strong> directive notes</a></li><li class="chapter-item "><a href="../../posts/angular/directive_communication.html"><strong aria-hidden="true">15.4.</strong> directive communication</a></li><li class="chapter-item "><a href="../../posts/angular/post_params.html"><strong aria-hidden="true">15.5.</strong> post params</a></li><li class="chapter-item "><a href="../../posts/angular/read_json_angular.html"><strong aria-hidden="true">15.6.</strong> read json angular</a></li><li class="chapter-item "><a href="../../posts/angular/same_route_reload.html"><strong aria-hidden="true">15.7.</strong> same route reload</a></li></ol></li><li class="chapter-item "><a href="../../posts/css/css.html"><strong aria-hidden="true">16.</strong> Css</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/css/use_css_media.html"><strong aria-hidden="true">16.1.</strong> use css media</a></li></ol></li><li class="chapter-item "><a href="../../posts/php/php.html"><strong aria-hidden="true">17.</strong> Php</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/php/for_php_string_implment_some_extemtion_functions.html"><strong aria-hidden="true">17.1.</strong> for php string implment some extemtion functions</a></li><li class="chapter-item "><a href="../../posts/php/php_cheatsheet.html"><strong aria-hidden="true">17.2.</strong> PHP cheatsheet</a></li></ol></li><li class="chapter-item "><a href="../../posts/windows/windows.html"><strong aria-hidden="true">18.</strong> Windows</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/windows/windows.html"><strong aria-hidden="true">18.1.</strong> Windows</a></li><li class="chapter-item "><a href="../../posts/windows/windows10_use_powershell_dedup_redundent_path.html"><strong aria-hidden="true">18.2.</strong> Windows10 use PowerShell dedup redundent PATH</a></li></ol></li><li class="chapter-item "><a href="../../posts/leetcode/leetcode.html"><strong aria-hidden="true">19.</strong> Leetcode</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/leetcode/rust_leetcode.html"><strong aria-hidden="true">19.1.</strong> rust leetcode</a></li><li class="chapter-item "><a href="../../posts/leetcode/rust_codewar.html"><strong aria-hidden="true">19.2.</strong> rust codewar</a></li><li class="chapter-item "><a href="../../posts/leetcode/swift_codewar.html"><strong aria-hidden="true">19.3.</strong> swift codewar</a></li><li class="chapter-item "><a href="../../posts/leetcode/js_leetcode.html"><strong aria-hidden="true">19.4.</strong> js leetcode</a></li><li class="chapter-item "><a href="../../posts/leetcode/java_leetcode.html"><strong aria-hidden="true">19.5.</strong> java leetcode</a></li><li class="chapter-item "><a href="../../posts/leetcode/rust_huawei.html"><strong aria-hidden="true">19.6.</strong> huawei test</a></li><li class="chapter-item "><a href="../../posts/leetcode/rust_utils.html"><strong aria-hidden="true">19.7.</strong> rust common functions</a></li><li class="chapter-item "><a href="../../posts/leetcode/olympiad_training.html"><strong aria-hidden="true">19.8.</strong> Computer olympiad training</a></li></ol></li><li class="chapter-item "><a href="../../posts/ctf/CTF.html"><strong aria-hidden="true">20.</strong> CTF</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/ctf/CTF_Note.html"><strong aria-hidden="true">20.1.</strong> CTF Note</a></li><li class="chapter-item "><a href="../../posts/ctf/0.1_Web.html"><strong aria-hidden="true">20.2.</strong> Web</a></li><li class="chapter-item "><a href="../../posts/ctf/4.1_Misc.html"><strong aria-hidden="true">20.3.</strong> Misc</a></li><li class="chapter-item "><a href="../../posts/ctf/3.2_PWN_note.html"><strong aria-hidden="true">20.4.</strong> PWN</a></li><li class="chapter-item "><a href="../../posts/ctf/3.1_Crypto.html"><strong aria-hidden="true">20.5.</strong> Crypto</a></li><li class="chapter-item "><a href="../../posts/ctf/3.4_RSA_note.html"><strong aria-hidden="true">20.6.</strong> Rsa attack</a></li><li class="chapter-item "><a href="../../posts/ctf/3.5_Base64.html"><strong aria-hidden="true">20.7.</strong> Base64</a></li><li class="chapter-item "><a href="../../posts/ctf/0.0_SQL Injection Cheatsheet.html"><strong aria-hidden="true">20.8.</strong> SQL Injection Cheatsheet</a></li><li class="chapter-item "><a href="../../posts/ctf/1.1_SQL_injection.html"><strong aria-hidden="true">20.9.</strong> SQL Injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.2_SQL_injection_UNION_attacks.html"><strong aria-hidden="true">20.10.</strong> SQL Injection UNION attacks</a></li><li class="chapter-item "><a href="../../posts/ctf/1.3_Blind SQL injection.html"><strong aria-hidden="true">20.11.</strong> Blind SQL Injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.4_Code Injection.html"><strong aria-hidden="true">20.12.</strong> Code Injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.5_SSRF.html"><strong aria-hidden="true">20.13.</strong> SSRF</a></li><li class="chapter-item "><a href="../../posts/ctf/1.6_OS command injection.html"><strong aria-hidden="true">20.14.</strong> OS command injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.7_Local file inclusion.html"><strong aria-hidden="true">20.15.</strong> Local file inclusion</a></li><li class="chapter-item "><a href="../../posts/ctf/1.8_Remote file inclusion.html"><strong aria-hidden="true">20.16.</strong> Remote file inclusion</a></li><li class="chapter-item "><a href="../../posts/ctf/1.9_CSRFm.html"><strong aria-hidden="true">20.17.</strong> CSRF</a></li><li class="chapter-item "><a href="../../posts/ctf/1.10_NoSQL injection.html"><strong aria-hidden="true">20.18.</strong> NoSQL injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.11_JSON injection.html"><strong aria-hidden="true">20.19.</strong> JSON injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.12_CTF_Web_SQL_Note.html"><strong aria-hidden="true">20.20.</strong> CTF Web SQL Note</a></li><li class="chapter-item "><a href="../../posts/ctf/2.1_XXE.html"><strong aria-hidden="true">20.21.</strong> XXE</a></li><li class="chapter-item "><a href="../../posts/ctf/2.2_XSS.html"><strong aria-hidden="true">20.22.</strong> XSS</a></li><li class="chapter-item "><a href="../../posts/ctf/2.3_Upload File.html"><strong aria-hidden="true">20.23.</strong> Upload File</a></li><li class="chapter-item "><a href="../../posts/ctf/2.4_serialize_unserialize.html"><strong aria-hidden="true">20.24.</strong> serialize unserialize</a></li><li class="chapter-item "><a href="../../posts/ctf/2.5_Race condition.html"><strong aria-hidden="true">20.25.</strong> Race condition</a></li><li class="chapter-item "><a href="../../posts/ctf/zip_plain_text_attack.html"><strong aria-hidden="true">20.26.</strong> Zip plain text attack</a></li><li class="chapter-item "><a href="../../posts/ctf/3.3_pwn HCTF2016 brop.html"><strong aria-hidden="true">20.27.</strong> pwn HCTF2016 brop</a></li><li class="chapter-item "><a href="../../posts/ctf/pwn_patch_defense_skill.html"><strong aria-hidden="true">20.28.</strong> PWN Patch defense skill</a></li><li class="chapter-item "><a href="../../posts/ctf/pwn_stack_overflow.html"><strong aria-hidden="true">20.29.</strong> PWN stack overflow</a></li><li class="chapter-item "><a href="../../posts/ctf/pwn_heap_overflow.html"><strong aria-hidden="true">20.30.</strong> PWN heap overflow</a></li><li class="chapter-item "><a href="../../posts/ctf/pwn_format_string_vulnerability.html"><strong aria-hidden="true">20.31.</strong> PWN Format String Vulnerability</a></li><li class="chapter-item "><a href="../../posts/ctf/kali_linux_tutorials.html"><strong aria-hidden="true">20.32.</strong> Kali linux tutorials</a></li><li class="chapter-item "><a href="../../posts/ctf/google_dorks_2023_lists.html"><strong aria-hidden="true">20.33.</strong> Google Dorks 2023 Lists</a></li><li class="chapter-item "><a href="../../posts/ctf/dvwa_writeup.html"><strong aria-hidden="true">20.34.</strong> DVWA WriteUp</a></li><li class="chapter-item "><a href="../../posts/ctf/bwapp_writeup.html"><strong aria-hidden="true">20.35.</strong> bWAPP WriteUp</a></li><li class="chapter-item "><a href="../../posts/ctf/sqlilabs_writeup.html"><strong aria-hidden="true">20.36.</strong> sqlilabs WriteUp</a></li><li class="chapter-item "><a href="../../posts/ctf/pwnable_kr_challenge.html"><strong aria-hidden="true">20.37.</strong> Solutions for pwnable.kr</a></li><li class="chapter-item "><a href="../../posts/ctf/the_periodic_table.html"><strong aria-hidden="true">20.38.</strong> The Periodic Table</a></li><li class="chapter-item "><a href="../../posts/ctf/pwntools_cheatsheet.html"><strong aria-hidden="true">20.39.</strong> Pwntools Cheatsheet</a></li><li class="chapter-item "><a href="../../posts/ctf/gdb_cheatsheet.html"><strong aria-hidden="true">20.40.</strong> GDB Cheatsheet</a></li></ol></li><li class="chapter-item "><a href="../../posts/iltes/iltes.html"><strong aria-hidden="true">21.</strong> ILTES</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/iltes/iltes_writing.html"><strong aria-hidden="true">21.1.</strong> ILTES Writing</a></li></ol></li></ol>
            </div>
            <div id="sidebar-resize-handle" class="sidebar-resize-handle"></div>
        </nav>

        <!-- Track and set sidebar scroll position -->
        <script>
            var sidebarScrollbox = document.querySelector('#sidebar .sidebar-scrollbox');
            sidebarScrollbox.addEventListener('click', function(e) {
                if (e.target.tagName === 'A') {
                    sessionStorage.setItem('sidebar-scroll', sidebarScrollbox.scrollTop);
                }
            }, { passive: true });
            var sidebarScrollTop = sessionStorage.getItem('sidebar-scroll');
            sessionStorage.removeItem('sidebar-scroll');
            if (sidebarScrollTop) {
                // preserve sidebar scroll position when navigating via links within sidebar
                sidebarScrollbox.scrollTop = sidebarScrollTop;
            } else {
                // scroll sidebar to current active section when navigating via "next/previous chapter" buttons
                var activeSection = document.querySelector('#sidebar .active');
                if (activeSection) {
                    activeSection.scrollIntoView({ block: 'center' });
                }
            }
        </script>

        <div id="page-wrapper" class="page-wrapper">

            <div class="page">
                                <div id="menu-bar-hover-placeholder"></div>
                <div id="menu-bar" class="menu-bar sticky">
                    <div class="left-buttons">
                        <button id="sidebar-toggle" class="icon-button" type="button" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
                            <i class="fa fa-bars"></i>
                        </button>
                        <button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
                            <i class="fa fa-paint-brush"></i>
                        </button>
                        <ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
                            <li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
                            <li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
                            <li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
                            <li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
                            <li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
                        </ul>
                        <button id="search-toggle" class="icon-button" type="button" title="Search. (Shortkey: s)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="S" aria-controls="searchbar">
                            <i class="fa fa-search"></i>
                        </button>
                    </div>

                    <h1 class="menu-title">Andrew&#x27;s Blog</h1>

                    <div class="right-buttons">
                        <a href="https://gitee.com/dnrops/dnrops" title="Git repository" aria-label="Git repository">
                            <i id="git-repository-button" class="fa fa-github"></i>
                        </a>

                    </div>
                </div>

                <div id="search-wrapper" class="hidden">
                    <form id="searchbar-outer" class="searchbar-outer">
                        <input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
                    </form>
                    <div id="searchresults-outer" class="searchresults-outer hidden">
                        <div id="searchresults-header" class="searchresults-header"></div>
                        <ul id="searchresults">
                        </ul>
                    </div>
                </div>

                <!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
                <script>
                    document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
                    document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
                    Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
                        link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
                    });
                </script>

                <div id="content" class="content">
                    <main>
                        <h1 id="how-to-create-yolov8-based-object-detection-web-service-using-python-julia-nodejs-javascript-go-and-rust"><a class="header" href="#how-to-create-yolov8-based-object-detection-web-service-using-python-julia-nodejs-javascript-go-and-rust">How to create YOLOv8-based object detection web service using Python, Julia, Node.js, JavaScript, Go and Rust</a></h1>
<p style="display:flex;
    align-items: center;
    justify-content: end;
">Pub Date: 2023-12-08</p>
<h2 id="introduction"><a class="header" href="#introduction">Introduction</a></h2>
<p>This is a second part of my article about the YOLOv8 neural network. In the <a href="https://dev.to/andreygermanov/a-practical-introduction-to-object-detection-with-yolov8-neural-network-3n8c">previous article</a> I provided a practical introduction to this model, and it’s common API. Then I showed how to create a web service that detects objects on images using Python and official YOLOv8 library based on PyTorch.</p>
<p>In this article, I am going to show how to work with the YOLOv8 model in low level, without the PyTorch and the official API. It will open a lot of new opportunities for deployment. Using concepts and examples of this post you will be able to create an AI powered object detection services that use ten time less resources, and you will be able to create these services not only on Python, but on most of the other programming languages. In particular, I will show how to create the YOLOv8 powered web service on Julia, Node.js, JavaScript, Go and Rust.</p>
<p>As a base, we will use the web service, developed in the previous article, which is available in <a href="https://github.com/AndreyGermanov/yolov8_pytorch_python">this repository</a>. We will just rewrite the backend of this web service on different languages. That is why it’s required to read the <a href="https://dev.to/andreygermanov/a-practical-introduction-to-object-detection-with-yolov8-neural-network-3n8c">first article</a> before continue reading this.</p>
<h1 id="yolov8-deployment-options"><a class="header" href="#yolov8-deployment-options"><a href="#yolov8-deployment-options"></a>YOLOv8 deployment options</a></h1>
<p>The YOLOv8 neural network, initially created using the PyTorch framework and exported as a set of “.pt” files. We used the Ultralytics API to train these models or make predictions based on them. To run them, it’s required to have an environment with Python and PyTorch.</p>
<p>PyTorch is a great framework to design, train and evaluate neural network models. In addition, it has tools to prepare or even generate the datasets to train the models and many other great utils. However, we do not need all this in production. If we talk about YOLOv8, then all that you need in production is to run the model with input image and receive resulting bounding boxes. However, the YOLOv8 implemented on Python. Does it mean that all programmers who want to use this great object detector must become the Python programmers? Does it mean that they must rewrite their applications on Python or integrate them with Python code? Fortunately not. The Ultralytics API has a great <code>export</code> function to convert any YOLOv8 model to a format, that can be used by external applications.</p>
<p>The following formats are supported at the moment:</p>
<div class="table-wrapper"><table><thead><tr><th>Format</th><th><code>format</code> Argument</th></tr></thead><tbody>
<tr><td><a href="https://pytorch.org/docs/stable/jit.html">TorchScript</a></td><td><code>torchscript</code></td></tr>
<tr><td><a href="https://onnx.ai/">ONNX</a></td><td><code>onnx</code></td></tr>
<tr><td><a href="https://docs.openvino.ai/latest/index.html">OpenVINO</a></td><td><code>openvino</code></td></tr>
<tr><td><a href="https://developer.nvidia.com/tensorrt">TensorRT</a></td><td><code>engine</code></td></tr>
<tr><td><a href="https://github.com/apple/coremltools">CoreML</a></td><td><code>coreml</code></td></tr>
<tr><td><a href="https://www.tensorflow.org/guide/saved_model">TF SavedModel</a></td><td><code>saved_model</code></td></tr>
<tr><td><a href="https://www.tensorflow.org/api_docs/python/tf/Graph">TF GraphDef</a></td><td><code>pb</code></td></tr>
<tr><td><a href="https://www.tensorflow.org/lite">TF Lite</a></td><td><code>tflite</code></td></tr>
<tr><td><a href="https://coral.ai/docs/edgetpu/models-intro/">TF Edge TPU</a></td><td><code>edgetpu</code></td></tr>
<tr><td><a href="https://www.tensorflow.org/js">TF.js</a></td><td><code>tfjs</code></td></tr>
<tr><td><a href="https://github.com/PaddlePaddle">PaddlePaddle</a></td><td><code>paddle</code></td></tr>
</tbody></table>
</div>
<p>For example, the <code>CoreML</code> is a neural network format, that can be used in iOS applications that run on iPhone.</p>
<p>Using the links in this table, you can read an overview of each of these formats.</p>
<p>The most interesting of them for us today is ONNX which is a lightweight runtime, created by Microsoft, that can be used to run neural network models on a wide range of platforms and programming languages. This is not a framework, but it’s just a shared library written in C. It’s just 16 MB in size for Linux, but it has interface bindings for most programming languages, including Python, PHP, JavaScript, Node.js, C++, Go and Rust. It has a simple API and if you wrote an ONNX code to run a model on one programming language, then it will not be difficult to rewrite it and use on other, which we will see today.</p>
<p>To follow the sections started from this one, you need to have <a href="https://python.org">Python</a> and <a href="https://jupyter.org">Jupyter Notebooks</a> installed.</p>
<p><a href=""></a></p>
<h1 id="export-yolov8-model-to-onnx"><a class="header" href="#export-yolov8-model-to-onnx"><a href="#export-yolov8-model-to-onnx"></a>Export YOLOv8 model to ONNX</a></h1>
<p>First, let’s load the YOLOv8 model and export in to ONNX format to make it usable. Run the Jupyter notebook and execute the following code in it.</p>
<pre><code>from ultralytics import YOLO
model = YOLO(&quot;yolov8m.pt&quot;)
model.export(format=&quot;onnx&quot;)
</code></pre>
<p>In the code above, you loaded the middle-sized YOLOv8 model for object detection and exported it to the ONNX format. This model is pretrained on COCO dataset and can detect 80 object classes.</p>
<p>After running this code, you should see the exported model in a file with the same name and the <code>.onnx</code> extension. In this case, you will see the <code>yolov8m.onnx</code> file in a folder where you run this code.</p>
<p>Before writing a web service based on ONNX, let’s discover how this library works in Jupyter Notebook to understand the main concepts.</p>
<p><a href=""></a></p>
<h1 id="explore-object-detection-on-image-using-onnx"><a class="header" href="#explore-object-detection-on-image-using-onnx"><a href="#explore-object-detection-on-image-using-onnx"></a>Explore object detection on image using ONNX</a></h1>
<p>Now when you have a model, let’s use ONNX to work with it. For simplicity, we will start with Python, because we already have a Python web application, that uses PyTorch and Ultralytics APIs. So, it will be easier to move it to ONNX.</p>
<p>Install the ONNX runtime library for Python by running the following command in your Jupyter notebook:</p>
<pre><code>!pip install onnxruntime
</code></pre>
<p>and import it:</p>
<pre><code>import onnxruntime as ort
</code></pre>
<p>We set the <code>ort</code> alias to it. Remember this abbreviation because in other programming languages you will often see <code>ort</code> instead on ONNX runtime.</p>
<p>The <code>ort</code> module is a root of the ONNX API. The main object of this API is the <code>InferenceSession</code> which used to instantiate a model to run prediction on it. Model instantiation works very similar to what we did before with Ultralytics:</p>
<pre><code>model = ort.InferenceSession(&quot;yolov8m.onnx&quot;, providers=['CPUExecutionProvider'])
</code></pre>
<p>Here we loaded the model, but from “.onnx” file instead on “.pt”. And now it’s ready to run.</p>
<p>And this is a moment when similarities between Ultralytics and ONNX end. If you remember, with Ultralytics you just run: <code>outputs = model.predict(&quot;image_file&quot;)</code> and received result. The smart predict method did the following for you automatically:</p>
<ol>
<li>Read the image from file</li>
<li>Convert it to the format of the YOLOv8 neural network input layer</li>
<li>Pass it through the model</li>
<li>Receive the raw model output</li>
<li>Parse the raw model output</li>
<li>Return structured information about detected objects and their bounding boxes</li>
</ol>
<p>The ONNX session object has a similar method <code>run</code>, but it implements only steps 3 and 4. Everything else is up to you, because ONNX does not know that this is the YOLOv8 model. It does not know which input this neural network expects to get and what the raw output of this model means. This is universal API for any kind of neural networks, it does not know about concrete use cases like object detection on images.</p>
<p>In terms of ONNX, the neural network is a black box that receives a multidimensional array of float numbers as an input and transforms it to other multidimensional array of numbers. Which numbers should be in the input and what mean the numbers in the output, it does not know. So, and what we can do with it?</p>
<p><img src="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/7621f97c19ce4daeae855b36246f0b8c~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=782&amp;h=203&amp;s=22434&amp;e=png&amp;a=1&amp;b=fdfbfb" alt="Image description" />]</p>
<p>Fortunately, the things are not so worst and something we can research. The shapes of input and output layers of a neural network are fixed, they are defined when neural network created and information about them exists in a model.</p>
<p>The ONNX session object has a helpful method <code>get_inputs()</code> to get the information about inputs that this model expects to receive and the <code>get_outputs()</code> to get the information about the outputs, that the model returns after processing the inputs.</p>
<p>Let’s get the inputs first:</p>
<pre><code>inputs = model.get_inputs();
len(inputs)
</code></pre>
<pre><code>1
</code></pre>
<p>Here we got the array of inputs and displayed the length of this array. The result is obvious: the network expects to get a single input. Let’s get it:</p>
<pre><code>input = inputs[0]
</code></pre>
<p>The input object has three fields: <code>name</code>, <code>type</code> and <code>shape</code>. Let’s get these values for our YOLOv8 model:</p>
<pre><code>print(&quot;Name:&quot;,input.name)
print(&quot;Type:&quot;,input.type)
print(&quot;Shape:&quot;,input.shape)
</code></pre>
<p>And this is the output that you will get:</p>
<pre><code>Name: images
Type: tensor(float)
Shape: [1, 3, 640, 640]
</code></pre>
<p>This is what we can discover from this:</p>
<ul>
<li>The name of expected input is <code>images</code> which is obvious. The YOLOv8 model receives the images as an input</li>
<li>The type of input is <code>tensor of float numbers</code>. The <a href="https://en.wikipedia.org/wiki/Tensor">tensor</a> can have many definitions, but from practical point of view which is important for us now, this is a multidimensional array of numbers, the array of float numbers. So, we can deduce that we need to convert our image to a multidimensional array of float numbers.</li>
<li>The shape shows the dimensions of this tensor. Here, you see that this array should be four dimensional. This should be a single image (1), that contains 3 matrices of 640x640 float numbers. What numbers should be in these matrices? The matrix of color components. As you should know, each color pixel has Red, Green and Blue components. Each color component can have values from 0 to 255. Also, you can deduce that the image must have 640x640 size. Finally, there should be 3 matrices: one 640x640 matrix that contain red component of each pixel, one for green and one for blue.</li>
</ul>
<p>Now you have enough observations to understand what need to do in the code to prepare the input data.</p>
<p><img src="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/261aed98724e4ad9bd5bb07d1acc6701~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=782&amp;h=203&amp;s=21823&amp;e=png&amp;a=1&amp;b=ffffff" alt="Image description" /></p>
<p><a href=""></a></p>
<h2 id="prepare-the-input"><a class="header" href="#prepare-the-input"><a href="#prepare-the-input"></a>Prepare the input</a></h2>
<p>We need to load an image, resize it to 640x640, extract information about Red, Green and Blue component of each pixel and construct 3 matrices of intensities of appropriate colors.</p>
<p>Let’s just do it using the Pillow python package, that we already used before. Ensure that it’s installed:</p>
<pre><code>!pip install pillow
</code></pre>
<p>For example, we will use the <code>cat_dog.jpg</code> image, that we used in the previous article:</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--Hj3v1AXE--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/h7ndyaueoc8elyqtq69u.jpg"><img src="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/39a91b4c790147e6b3a275bdcf7bb877~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=612&amp;h=415&amp;s=34608&amp;e=jpg&amp;b=637c1e" alt="Image description" /></a></p>
<p>Let’s load and resize it:</p>
<pre><code>from PIL import Image

img = Image.open(&quot;cat_dog.jpg&quot;)
img_width, img_height = img.size;
img = img.resize((640,640))
</code></pre>
<p>First, you loaded the Image object from the Pillow library. Then you created the <code>img</code> object from the <code>cat_dog.jpg</code> file. Then we saved the original size of the image to the <code>img_width</code> and <code>img_height</code> variables, that will be needed later. Finally, we resized it, providing the new size as a (640,640) tuple.</p>
<p>Now we need to extract each color component of each pixel and construct 3 matrices from them. But here we have one thing that can lead to inconsistencies in the future. Each pixel has four color channels: Red, Green, Blue and Alpha. The alpha channel describes the transparency of a pixel. We do not need Alpha channel in the image for YOLOv8 predictions. Let’s remove it:</p>
<pre><code>img = img.convert(&quot;RGB&quot;);
</code></pre>
<p>By default, the image with Alpha channel has “RGBA” color model. By this line, you converted it to “RGB”. This way, you’ve removed the alpha channel.</p>
<p>Now it’s time to create 3 matrices of color channel values. We can do this manually, but Python has a great interoperability between libraries. The NumPy library, that usually used to work with multidimensional arrays, can just load the Pillow image object as an array as simple as this:</p>
<pre><code>import numpy as np

input = np.array(img)
</code></pre>
<p>Here, you imported NumPy and just loaded the image to the <code>input</code> NumPy array. Let’s see the shape of this array now:</p>
<pre><code>input.shape
</code></pre>
<pre><code>(640, 640, 3)
</code></pre>
<p>Almost fine, but the dimensions go in wrong order. We need to put <code>3</code> in the beginning. The <code>transpose</code> function can switch dimensions of NumPy array:</p>
<pre><code>input = input.transpose(2,0,1)
input.shape
</code></pre>
<pre><code>(3,640,640)
</code></pre>
<p>The numbering of dimensions starts from 0. So, we had 0=640, 1=640, 2=3. Then, using the <code>transpose</code> function, we moved the dimension number <code>2</code> to the first place. Finally, received the shape (3,640,640).</p>
<p>But we need to add one more dimension to the beginning to make it (1,3,640,640). The reshape function can do this:</p>
<pre><code>input = input.reshape(1,3,640,640)
</code></pre>
<p>Now we have correct input shape, but if you try to see contents of this array, like for example, the red component of the first pixel:</p>
<pre><code>input[0,0,0,0]
</code></pre>
<p>you’ll probably see the integer:</p>
<pre><code>71
</code></pre>
<p>but the float numbers required. Moreover, as a rule, the numbers for machine learning must be <strong>scaled</strong>, e.g. scaled to a range from 0 to 1. Having a knowledge, that the color value can be in a range from 0 to 255, we can scale all pixels to a 0-1 range if divide them by <code>255.0</code>. The NumPy allows doing this in a single line of code:</p>
<pre><code>input = input/255.0

input[0,0,0,0]
</code></pre>
<pre><code>0.2784313725490196
</code></pre>
<p>In the code above, you divided all numbers in array and displayed the first of them: the red color component intensity for the first pixel. So, this is how the input data should look.</p>
<p><a href=""></a></p>
<h2 id="run-the-model"><a class="header" href="#run-the-model"><a href="#run-the-model"></a>Run the model</a></h2>
<p>Now, before running the prediction process, let’s see, which output the YOLOv8 model should return. As said above, this can be done using the <code>get_outputs()</code> method of ONNX session object. The result value of this method has the same type as the value of the <code>get_inputs()</code>, because as I said before: “the only work of neural network is to transform one array of numbers provided as an input to other array of numbers”. So, let’s see the form of the output of pretrained YOLOv8 model:</p>
<pre><code>outputs = model.get_outputs()
output = outputs[0]
print(&quot;Name:&quot;,output.name)
print(&quot;Type:&quot;,output.type)
print(&quot;Shape:&quot;,output.shape)
</code></pre>
<pre><code>Name: output0
Type: tensor(float)
Shape: [1, 84, 8400]
</code></pre>
<p>The ONNX is a universal platform to run neural networks of any kind. That is why it assumes, that the network can have many inputs and many outputs, and it accepts array of inputs and array of outputs, even if these arrays have only single item. YOLOv8 has a single output, which is a first item of the <code>outputs</code> object.</p>
<p>Here you see that the output has an <code>output0</code> name, it also has a form of tensor of float numbers and a shape of this output is [1,84,8400] which means that this is a single 84x8400 matrix, that nested to a single array. In practice, it means that the YOLOv8 network returns, 8400 bounding boxes and each bounding box has 84 parameters. It’s a little bit ugly that each bounding box is column here, but not row. It’s a technical requirement of neural network algorithm. I think it would be better to transpose it to 8400x84, so, it will be clear that there are 8400 rows that match detected objects and that each row is a bounding box with 84 parameters.</p>
<p>We will discuss why there are so many parameters for a single bounding box later. First, we should run the model to get the data for this output. We have everything for this now.</p>
<p>To run prediction for YOLOv8 model, we need to execute the <code>run</code> method, which has the following signature:</p>
<pre><code>model.run(output_names,inputs)
</code></pre>
<ul>
<li><code>output_names</code> - the array of names of outputs that you want to receive. In YOLOv8 model, it will be an array with a single item.</li>
<li><code>inputs</code> - the dictionary of inputs, that you pass to the network in a format {name:tensor} where <code>name</code> is a name of input and the <code>tensor</code> is an image data array that we prepared before.</li>
</ul>
<p>To run the prediction for the data that you prepared, you can run the following:</p>
<pre><code>outputs = model.run([&quot;output0&quot;], {&quot;images&quot;:input})
len(outputs)
</code></pre>
<pre><code>1
</code></pre>
<p>As you seen earlier, the only output of this model has a name <code>output0</code> and the name of the only input is <code>images</code>. The data tensor for the input you prepared in the <code>input</code> variable.</p>
<p>If everything went well, it will display that the length of received <code>outputs</code> array is <code>1</code> which means that you have only single output. However, if you receive the error that says that the input must be in <code>float</code> format, then convert it to <code>float32</code> using the following line:</p>
<pre><code>input = input.astype(np.float32)
</code></pre>
<p>and then run again.</p>
<p>Then we are close to the most interesting part of the work: process the output.</p>
<p><a href=""></a></p>
<h1 id="process-the-output"><a class="header" href="#process-the-output"><a href="#process-the-output"></a>Process the output</a></h1>
<p>There is an only single output, so we can extract it from outputs:</p>
<pre><code>output = outputs[0]
output.shape
</code></pre>
<pre><code>(1, 84, 8400)
</code></pre>
<p>So, as you see, it returned the output of correct shape. As the first dimension has only single item, we can just get it:</p>
<pre><code>output = output[0]
output.shape
</code></pre>
<pre><code>(84, 8400)
</code></pre>
<p>We turned it out to a matrix with 84 rows and, 8400 columns. As I said before, it has a transposed form which is not very suitable for work, let’s transpose it again:</p>
<pre><code>output = output.transpose()
</code></pre>
<pre><code>(8400, 84)
</code></pre>
<p>Now it’s more clear: 8400 rows with 84 parameters. 8400 is a maximum number of bounding boxes that the YOLOv8 model can detect, and it returns 8400 lines for any image regardless of how many objects really detected on it, because the output of the neural network is fixed and defined during the neural network design. It can’t be variable. So, it returns 8400 rows every time, but the most of these rows contain just garbage. How to detect, which of these rows have meaningful data and which of them are garbage? To do that, we need to discover 84 parameters that each of these row has.</p>
<p>The first 4 elements are coordinates of the bounding box, and all others are the probabilities of all object classes that this model can detect. The pretrained model that you use in this tutorial can detect 80 object classes, that is why, each bounding box has 84 parameters: 4+80. If you use another model, that, for example, trained to detect 3 object classes, then it will have 7 parameters in a row because of 4+3.</p>
<p>Let’s for example display the row number 0:</p>
<pre><code>row = output[0]
print(row)
</code></pre>
<pre><code>[     5.1182      8.9662      13.247      19.459  2.5034e-06  2.0862e-07  5.6624e-07  1.1921e-07  2.0862e-07  1.1921e-07  1.7881e-07  1.4901e-07  1.1921e-07  2.6822e-07  1.7881e-07  1.1921e-07  1.7881e-07  4.1723e-07  5.6624e-07  2.0862e-07  1.7881e-07  2.3842e-07  3.8743e-07  3.2783e-07  1.4901e-07  8.9407e-08
  3.8743e-07  2.9802e-07  2.6822e-07  2.6822e-07  2.3842e-07  2.0862e-07  5.9605e-08  2.0862e-07  1.4901e-07  1.1921e-07  4.7684e-07  2.6822e-07  1.7881e-07  1.1921e-07  8.9407e-08  1.4901e-07  1.7881e-07  2.6822e-07  8.9407e-08  2.6822e-07  3.8743e-07  1.4901e-07  2.0862e-07  4.1723e-07  1.9372e-06  6.5565e-07
  2.6822e-07  5.3644e-07  1.2815e-06  3.5763e-07  2.0862e-07  2.3842e-07  4.1723e-07  2.6822e-07  8.3447e-07  8.9407e-08  4.1723e-07  1.4901e-07  3.5763e-07  2.0862e-07  1.1921e-07  5.9605e-08  5.9605e-08  1.1921e-07  1.4901e-07  1.4901e-07  1.7881e-07  5.9605e-08  8.9407e-08  2.3842e-07  1.4901e-07  2.0862e-07
  2.9802e-07  1.7881e-07  1.1921e-07  2.3842e-07  1.1921e-07  1.1921e-07]
</code></pre>
<p>Here you see that this row represents a bounding box with coordinates [5.1182, 8.9662, 13.247, 19.459]. These values are coordinates of a center of this bounding box, the width and the height:</p>
<p>x_center = 5.1182<br />
y_center = 8.9662<br />
width = 13.247<br />
height = 19.459</p>
<p>Let’s slice out these variables from the row:</p>
<pre><code>xc,yc,w,h = row[:4]
</code></pre>
<p>All other values are the probabilities that the detected object belongs to each of 80 classes. So, assuming that the array numbering starts from 0, the item number 4 contains the probability that the object belongs to class 0 (2.5034e-06), item number 5 contains the probability that the object belongs to class 1 (2.0862e-07) etc.</p>
<p>Now lets remove all garbage and parse this row to a format, that we got in the <a href="https://dev.to/andreygermanov/a-practical-introduction-to-object-detection-with-yolov8-neural-network-3n8c#get_started">previous article</a>: [x1,y1,x2,y2,class_label,probability].</p>
<p>To calculate coordinates of bounding box corners you can use the following formulas:</p>
<pre><code>x1 = xc-w/2
y1 = yc-h/2
x2 = xc+w/2
y2 = yc+h/2
</code></pre>
<p>but there is a very important reminder: do you remember that we scaled the image to 640x640 in the beginning? It means that these coordinates returned in assumption that the image has this size. To get coordinates of this bounding box for the original image, we need to scale them in proportion to the dimensions of the original image. We saved the original width and height to the <code>img_width</code> and <code>img_height</code> variables, and to scale the corners of the bounding box, we need to modify the formulas:</p>
<pre><code>x1 = (xc - w/2) / 640 * img_width
y1 = (yc - h/2) / 640 * img_height
x2 = (xc + w/2) / 640 * img_width
y2 = (yc + h/2) / 640 * img_height
</code></pre>
<p>Then you need to find the object with a maximum probability. On the one hand you can do this in a loop, iterating from 4 to 84 items of this array and select the item index with maximum probability value, but the NumPy has the convenient methods for this:</p>
<pre><code>prob = row[4:].max()
class_id = row[4:].argmax()

print(prob, class_id)
</code></pre>
<pre><code>2.503395e-06 0
</code></pre>
<p>The first line returns the maximum value of subarray from 4 until the end of the row. The second line returns the index of the element with this maximum value. So, here you see that the first probability has a maximum value, and it means that this bounding box belongs to class 0.</p>
<p>To replace class ID with class label, you should have an array of classes, that the model can predict. In case of this model, this is 80 classes from the COCO dataset. Here they are:</p>
<pre><code>yolo_classes = [
    &quot;person&quot;, &quot;bicycle&quot;, &quot;car&quot;, &quot;motorcycle&quot;, &quot;airplane&quot;, &quot;bus&quot;, &quot;train&quot;, &quot;truck&quot;, &quot;boat&quot;,
    &quot;traffic light&quot;, &quot;fire hydrant&quot;, &quot;stop sign&quot;, &quot;parking meter&quot;, &quot;bench&quot;, &quot;bird&quot;, &quot;cat&quot;, &quot;dog&quot;, &quot;horse&quot;,
    &quot;sheep&quot;, &quot;cow&quot;, &quot;elephant&quot;, &quot;bear&quot;, &quot;zebra&quot;, &quot;giraffe&quot;, &quot;backpack&quot;, &quot;umbrella&quot;, &quot;handbag&quot;, &quot;tie&quot;,
    &quot;suitcase&quot;, &quot;frisbee&quot;, &quot;skis&quot;, &quot;snowboard&quot;, &quot;sports ball&quot;, &quot;kite&quot;, &quot;baseball bat&quot;, &quot;baseball glove&quot;,
    &quot;skateboard&quot;, &quot;surfboard&quot;, &quot;tennis racket&quot;, &quot;bottle&quot;, &quot;wine glass&quot;, &quot;cup&quot;, &quot;fork&quot;, &quot;knife&quot;, &quot;spoon&quot;,
    &quot;bowl&quot;, &quot;banana&quot;, &quot;apple&quot;, &quot;sandwich&quot;, &quot;orange&quot;, &quot;broccoli&quot;, &quot;carrot&quot;, &quot;hot dog&quot;, &quot;pizza&quot;, &quot;donut&quot;,
    &quot;cake&quot;, &quot;chair&quot;, &quot;couch&quot;, &quot;potted plant&quot;, &quot;bed&quot;, &quot;dining table&quot;, &quot;toilet&quot;, &quot;tv&quot;, &quot;laptop&quot;, &quot;mouse&quot;,
    &quot;remote&quot;, &quot;keyboard&quot;, &quot;cell phone&quot;, &quot;microwave&quot;, &quot;oven&quot;, &quot;toaster&quot;, &quot;sink&quot;, &quot;refrigerator&quot;, &quot;book&quot;,
    &quot;clock&quot;, &quot;vase&quot;, &quot;scissors&quot;, &quot;teddy bear&quot;, &quot;hair drier&quot;, &quot;toothbrush&quot;
]
</code></pre>
<p>In case if you use other custom trained model, then you can get this array from the YAML file, that used for training. You can find about YAML files that used to train YOLOv8 models in my <a href="https://dev.to/andreygermanov/a-practical-introduction-to-object-detection-with-yolov8-neural-network-3n8c#prepare_data">previous article</a>.</p>
<p>Then you can just get a class label by ID:</p>
<pre><code>label = yolo_classes[class_id]
</code></pre>
<p>This is how you should parse each row of YOLOv8 model output.</p>
<p>However, this probability is too low, because 2.503395e-06 = 2.503395 / 1000000 = 0.000002503. So, this bounding box, perhaps just garbage that should be filtered out. I recommend filtering out all bounding boxes with probability less than 0.5.</p>
<p>Let’s write all the row parsing code above as a function, to parse any row this way:</p>
<pre><code>def parse_row(row):
    xc,yc,w,h = row[:4]
    x1 = (xc-w/2)/640*img_width
    y1 = (yc-h/2)/640*img_height
    x2 = (xc+w/2)/640*img_width
    y2 = (yc+h/2)/640*img_height
    prob = row[4:].max()
    class_id = row[4:].argmax()
    label = yolo_classes[class_id]
    return [x1,y1,x2,y2,label,prob]
</code></pre>
<p>Now you can write a code that parses and filter outs all rows from output:</p>
<pre><code>boxes = [row for row in [parse_row(row) for row in output] if row[5]&gt;0.5]
len(boxes)
</code></pre>
<pre><code>20
</code></pre>
<p>Here I used the Python list comprehensions. The internal list:</p>
<pre><code>[parse_row(row) for row in output]
</code></pre>
<p>used to parse each row and return an array of parsed rows in<br />
a format [x1,y1,x2,y2,class_id,prob].</p>
<p>and then, the external list used to filter all of these rows if their probability is less than 0.5</p>
<pre><code>[row for row in [((parsed_rows))] in row[5]&gt;0.5]
</code></pre>
<p>After this, the <code>len(boxes)</code> shows that only 20 boxes left after filtering. Much closer to expected result than 8400, but still it’s too much, because we have an image with only one cat and one dog. Curious, what else detected? Let’s show this data:</p>
<pre><code>[261.28302669525146, 95.53291285037994, 461.15666942596437, 313.4492515325546, 'dog', 0.9220365]
[261.16701192855834, 95.61400711536407, 460.9202187538147, 314.0579136610031, 'dog', 0.92195505]
[261.0219168663025, 95.50403118133545, 460.9265221595764, 313.81584787368774, 'dog, 0.9269446]
[260.7873046875, 95.70514416694641, 461.4101188659668, 313.7423722743988, 'dog', 0.9269207]
[139.5556526184082, 169.4101345539093, 255.12585411071777, 314.7275745868683, 'cat', 0.8986903]
[139.5316062927246, 169.63674533367157, 255.05698356628417, 314.6878091096878, 'cat', 0.90628827]
[139.68495998382568, 169.5753903388977, 255.12413234710692, 315.06962299346924, 'cat', 0.88975877]
[261.1445414543152, 95.70124578475952, 461.0543995857239, 313.6095304489136, 'dog', 0.926944]
[260.9405124664307, 95.77976751327515, 460.99450263977053, 313.57664155960083, 'dog', 0.9247296]
[260.49400663375854, 95.79500484466553, 461.3895306587219, 313.5762457847595, 'dog', 0.9034922]
[139.59658827781678, 169.2822597026825, 255.2673086643219, 314.9018738269806, 'cat', 0.88215613]
[139.46405625343323, 169.3733571767807, 255.28112654685975, 314.9132820367813, 'cat', 0.8780577]
[139.633131980896, 169.65343713760376, 255.49261894226075, 314.88970375061035, 'cat', 0.8653987]
[261.18754177093507, 95.68838310241699, 461.0297842025757, 313.1688747406006, 'dog', 0.9215225]
[260.8274451255798, 95.74608707427979, 461.32597131729125, 313.3906273841858, 'dog', 0.9093932]
[260.5131794929504, 95.89693665504456, 461.3481791496277, 313.24405217170715, 'dog', 0.8848127]
[139.4986301422119, 169.38371658325195, 255.34583129882813, 314.9019331932068, 'cat', 0.836439]
[139.55282192230223, 169.58951950073242, 255.61378440856933, 314.92880630493164, 'cat', 0.87574947]
[139.65414333343506, 169.62119138240814, 255.79856758117677, 315.1192432641983, 'cat', 0.8512477]
[139.86577434539797, 169.38782274723053, 255.5904968261719, 314.77193105220795, 'cat', 0.8271704]
</code></pre>
<p>All these boxes have high probability and their coordinates overlap each other. Let’s draw these boxes on the image to see why is it.</p>
<p>The <code>PIL</code> package has the <code>ImageDraw</code> module, that allows to draw rectangles or other figures on top of images. Let’s load the image using this object:</p>
<pre><code>from PIL import ImageDraw
img = Image.open(&quot;cat_dog.jpg&quot;)
draw = ImageDraw(img)
</code></pre>
<p>and draw each bounding box on the image using the created <code>draw</code> object in a loop:</p>
<pre><code>for box in boxes:
    x1,y1,x2,y2,class_id,prob = box
    draw.rectangle((x1,y1,x2,y2),None,&quot;#00ff00&quot;)

img
</code></pre>
<p>This code draws the green rectangles for each bounding box and displays the resulting image, which will look like this:</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--H-jJ95dA--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/seng6q3rgczha3aov7j8.png"><img src="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/7a40ed1249ce4bcb92ccd84faaf75c3e~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=599&amp;h=402&amp;s=30731&amp;e=jpg&amp;b=657f1f" alt="Image description" /></a></p>
<p>It draws all these 20 boxes on top of each other, so they look like just 2 boxes. As a human, you can see that all these 20 boxes belong to the same 2 objects. However, the neural network is not a human, and it thinks that it found 20 different cats and dogs that overlap each other, because it’s theoretically possible that different objects on the image can overlap each other. Perhaps it sounds crazy, but this is how it works.</p>
<p>It’s up to you to select which of these boxes should stay and which to filter out. How you can do this? On the one hand, you can select the box with the highest probability for dog and the box with the highest probability for cat and remove all others. However, it’s not a useful solution for all cases, because you can have images with several dogs and several cats at the same time. You should find and use some general purpose algorithm that removes all boxes that closely overlap each other. Fortunately, this algorithm already exists and it’s called the <a href="https://towardsdatascience.com/non-maximum-suppression-nms-93ce178e177c">Non-maximum suppression</a>. These are the steps that you should implement to make it working:</p>
<ol>
<li>Create an empty resulting array that will contain a list of boxes that you want to keep.</li>
<li>Start a loop</li>
<li>From source boxes array, select the box with the highest probability and move it to the resulting array.</li>
<li>Compare the selected box with each other box from the source array and remove all of them that overlap the selected one too much.</li>
<li>If the source array contains more boxes, move to step 2 and repeat</li>
</ol>
<p>After loop finished, the source boxes array will be empty, and the resulting array will contain only different boxes. Now let’s understand how to implement step 4, how to compare two boxes and find that they overlap each other too much. To find it, we will use other algorithm - “Intersection over Union” or IoU. This algorithm is actually a formula:</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--CM9y-I1f--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/9w6l8zv1xk7erwwh4a69.png"><img src="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/a3532a2cdf284f9d8d50ef4fe2ec8ca6~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=450&amp;h=343&amp;s=10765&amp;e=png&amp;b=ffffff" alt="Image description" /></a></p>
<p>The idea of this algorithm is:</p>
<ol>
<li>Calculate the area of intersection of two boxes.</li>
<li>Calculate the area of their union.</li>
<li>Divide first by second.</li>
</ol>
<p>The closer the result to 1, the more two boxes overlap each other. You can see this visually: the closer the area of intersection of two boxes to the area of their union, the more it looks like the same box. In the left box below the formula these boxes overlap each other, but not too much, and the IoU in this case could be about 0.3. Definitely, these two boxes can be treated as different objects, even if they overlap. On the second example it’s clear that the area of intersection is much closer to the area of their union, perhaps the IoU will be about 0.8 here. Highly likely that one of these boxes should be removed. Finally, the boxes on the right sample represent almost the same area and definitely only one of them should stay.</p>
<p>Now let’s implement both IoU and Non-Maximum suppression in code.</p>
<p><a href=""></a></p>
<h3 id="intersection-over-union"><a class="header" href="#intersection-over-union"><a href="#intersection-over-union"></a>Intersection over union</a></h3>
<p>1 Calculate the area of intersection</p>
<pre><code>def intersection(box1,box2):
    box1_x1,box1_y1,box1_x2,box1_y2 = box1[:4]
    box2_x1,box2_y1,box2_x2,box2_y2 = box2[:4]
    x1 = max(box1_x1,box2_x1)
    y1 = max(box1_y1,box2_y1)
    x2 = min(box1_x2,box2_x2)
    y2 = min(box1_y2,box2_y2)
    return (x2-x1)*(y2-y1) 
</code></pre>
<p>Here, we calculate the area of intersection rectangle using its width (x2-x1) and height (y2-y1).</p>
<p>2 Calculate the area of union</p>
<pre><code>def union(box1,box2):
    box1_x1,box1_y1,box1_x2,box1_y2 = box1[:4]
    box2_x1,box2_y1,box2_x2,box2_y2 = box2[:4]
    box1_area = (box1_x2-box1_x1)*(box1_y2-box1_y1)
    box2_area = (box2_x2-box2_x1)*(box2_y2-box2_y1)
    return box1_area + box2_area - intersection(box1,box2)
</code></pre>
<p>3 Divide first by second</p>
<pre><code>def iou(box1,box2):
    return intersection(box1,box2)/union(box1,box2)
</code></pre>
<p><a href=""></a></p>
<h3 id="non-maximum-suppression"><a class="header" href="#non-maximum-suppression"><a href="#nonmaximum-suppression"></a>Non-maximum suppression</a></h3>
<p>So, we have an array of boxes in the <code>boxes</code> variable, and we need to leave only different items in it, using the created <code>iou</code> function as a criterion of difference. Let’s say that if <code>IoU</code> of two boxes less than 0.7, then they both should stay. Otherwise, one of them with lesser probability should leave. Let’s implement it:</p>
<pre><code>boxes.sort(key=lambda x: x[5], reverse=True)
result = []
while len(boxes)&gt;0:
    result.append(boxes[0])
    boxes = [box for box in boxes if iou(box,boxes[0])&lt;0.7]
</code></pre>
<p>For convenience, in the first line, we sorted all boxes by probability in reverse order to move the boxes with the highest probabilities to the top.</p>
<p>Then the code defines the array for resulting boxes. In a loop it puts the first box (which is a box with the highest probability) in the resulting array and on the next line it overwrites the boxes array with only boxes, that have the ‘IoU’ with selected box that is less than 0.7.</p>
<p>It continues doing that in a loop until the <code>boxes</code> contains no items.</p>
<p>After running it, you can print the <code>result</code> array:</p>
<pre><code>print(result)
</code></pre>
<pre><code>[
[261.0219168663025, 95.50403118133545, 460.9265221595764, 313.81584787368774, 'dog', 0.9269446],
[139.5316062927246, 169.63674533367157, 255.05698356628417, 314.6878091096878, 'cat', 0.90628827]
]
</code></pre>
<p>Now it has just 2 items, as it should. The IoU did it magic work and selected the best boxes for cat and dog with the highest probabilities.</p>
<p>So, finally, you did it! Can you realize how much code you had to write instead of single <code>model.predict()</code> line in Ultralytics API? However, now you have a knowledge how it really works, and awareness of these algorithms makes you independent of PyTorch environment. Now you can create applications which use the YOLOv8 models using any programming language supported by ONNX and I will show you how to do this.</p>
<p>In the next sections we will refactor the object detection web service, written in the previous article, to use ONNX instead of PyTorch. We will rewrite it on Python, Julia, Node.js, JavaScript, Go and Rust.</p>
<p>The first section with Python defines the project structure, the functions, and their relations, and then we will rewrite all these functions in other programming languages without changing the structure of the project.</p>
<p>The Python section is recommended for everyone, then you can move on to sections related to your chosen language. Using the defined project structure and algorithms, you will be able to write the web service on any other language, that supports ONNX.</p>
<p>I assume that you are familiar with all languages that you choose and have all required IDE’s and tools to write, compile and run that code. I will focus only on ONNX and algorithms, described above, and will not teach you programming on these languages. Furthermore, I will not dive to their standard libraries. However, I will provide links to API docs of all external packages and frameworks that we will use, and you should either know APIs of these libraries or be able to learn them using that documentation.</p>
<p><a href=""></a></p>
<h1 id="create-a-web-service-on-python"><a class="header" href="#create-a-web-service-on-python"><a href="#create-a-web-service-on-python"></a>Create a web service on Python</a></h1>
<p><a href=""></a></p>
<h2 id="setup-the-project"><a class="header" href="#setup-the-project"><a href="#setup-the-project"></a>Setup the project</a></h2>
<p>We will use the project, created in the <a href="https://dev.to/andreygermanov/a-practical-introduction-to-object-detection-with-yolov8-neural-network-3n8c">previous article</a> as a base. You can get it from <a href="https://github.com/AndreyGermanov/yolov8_pytorch_python">this</a> repository.</p>
<p>Create a new folder and copy the following files to it from the project above:</p>
<ul>
<li><code>index.html</code> - frontend</li>
<li><code>object_detector.py</code> - backend</li>
<li><code>requirements.txt</code> - list of external dependencies</li>
</ul>
<p>also copy the ONNX model <code>yolov8m.onnx</code> that you exported in the <a href="#export_onnx">beginning of the article</a>.</p>
<p>Then, open the <code>requirements.txt</code> file and replace the <code>ultralytics</code> dependence to <code>onnxruntime</code>. Also, add the <code>numpy</code> package to the list. It will be used to convert image to array. Finally, the list of dependencies should look like this:</p>
<p><a href="https://onnxruntime.ai/docs/api/python/api_summary.html">onnxruntime</a><br />
<a href="https://flask.palletsprojects.com/en/2.3.x/">flask</a><br />
<a href="https://flask.palletsprojects.com/en/2.3.x/deploying/waitress/">waitress</a><br />
<a href="https://pillow.readthedocs.io/en/stable/">pillow</a><br />
<a href="https://numpy.org/doc/stable/">numpy</a></p>
<p>Ensure that all these packages installed: you can install them one by one using PIP, or the better option is to install all them at once:</p>
<pre><code>pip install -r requirements.txt
</code></pre>
<p>We will not change frontend, so <code>index.html</code> will stay the same. The only file that we will change is the <code>object_detector.py</code>, where we will rewrite the object detection code, that previously used Ultralytics APIs to use ONNX runtime.</p>
<p>Let’s make a few changes to the structure of this file:</p>
<pre><code>import onnxruntime as ort
from flask import request, Flask, jsonify
from waitress import serve
from PIL import Image
import numpy as np
import json

app = Flask(__name__)


def main():
    serve(app, host='0.0.0.0', port=8080)


@app.route(&quot;/&quot;)
def root():
    with open(&quot;index.html&quot;) as file:
        return file.read()


@app.route(&quot;/detect&quot;, methods=[&quot;POST&quot;])
def detect():
    buf = request.files[&quot;image_file&quot;]
    boxes = detect_objects_on_image(buf.stream)
    return jsonify(boxes)


def detect_objects_on_image(buf):
    model = YOLO(&quot;best.pt&quot;)
    results = model.predict(buf)
    result = results[0]
    output = []
    for box in result.boxes:
        x1, y1, x2, y2 = [
            round(x) for x in box.xyxy[0].tolist()
        ]
        class_id = box.cls[0].item()
        prob = round(box.conf[0].item(), 2)
        output.append([
            x1, y1, x2, y2, result.names[class_id], prob
        ])
    return output


main()
</code></pre>
<p>If you compare this listing with the original <code>object_detector.py</code>, you’ll see that I removed the <code>ultralytics</code> package and put the line that imports the ONNX runtime: <code>import onnxruntime as ort</code>. Also, I’ve imported numpy as <code>np</code>.</p>
<p>Then, I put the code that runs a web server to the <code>main</code> function and put it to the beginning. Finally, I call the main() as a last line.</p>
<p>We will not change the routes inside the main function, so the <code>root</code> and <code>detect</code> functions will remain the same. We will rewrite only the <code>detect_objects_on_image</code> to use ONNX runtime instead of Ultralytics. The implementation will be more complex than now, but you already know everything if followed the previous section of this article.</p>
<p>We will split the <code>dected_objects_on_image</code> function to three parts:</p>
<ul>
<li>Prepare the input</li>
<li>Run the model</li>
<li>Process the output</li>
</ul>
<p>Each phase we will put to a separate function, which the <code>detect_objects_on_image</code> will call. Replace the content of this function to the following:</p>
<pre><code>def detect_objects_on_image(buf):
    input, img_width, img_height = prepare_input(buf)
    output = run_model(input)
    return process_output(output,img_width,img_height)

def prepare_input(buf):
    pass

def run_model(input):
    pass

def process_output(output,img_width,img_height):
    pass
</code></pre>
<ul>
<li>In the first line, the <code>prepare_input</code> function receives the uploaded file content, converts it to the <code>input</code> array and returns it. In addition, it returns the original dimensions of the image: <code>image_width</code> and <code>image_height</code>, that will be used later to scale detected bounding boxes.</li>
<li>Then, the <code>run_model</code> function receives the <code>input</code> and runs the ONNX session with it. It returns the <code>output</code> which is an array with (1,84,8400) shape.</li>
<li>Finally, the <code>output</code> passed to the <code>process_output</code> function, along with the original image size (<code>img_width</code>, <code>img_height</code>). This function should return the array of bounding boxes. Each item of this array has the following format: <code>[x1,y1,x2,y2,class_label,prob]</code>.</li>
</ul>
<p>Let’s write these functions one by one.</p>
<p><a href=""></a></p>
<h2 id="prepare-the-input-1"><a class="header" href="#prepare-the-input-1"><a href="#prepare-the-input"></a>Prepare the input</a></h2>
<p>The <code>prepare_input</code> function uses the code that you have written in the <a href="#prepare_the_input">Prepare the input</a> section. This is how it looks:</p>
<pre><code>def prepare_input(buf):
    img = Image.open(buf)
    img_width, img_height = img.size
    img = img.resize((640, 640))
    img = img.convert(&quot;RGB&quot;)
    input = np.array(img)
    input = input.transpose(2, 0, 1)
    input = input.reshape(1, 3, 640, 640) / 255.0
    return input.astype(np.float32), img_width, img_height
</code></pre>
<ul>
<li>This code loads the image, saves its size to <code>img_width</code> and <code>img_height</code> variables.</li>
<li>Then it resizes it, removes the transparency by converting to RGB, and converts to a tensor of pixels by loading as an <code>np.array()</code>.</li>
<li>Then it transposes and reshapes the array to convert it from (640,640,3) shape to the (1,3,640,640) shape, divides all values by 255.0 to scale it and make compatible with ONNX model input format.</li>
<li>Finally, it returns the input array converted to “Float32” data type along with original <code>img_width</code> and <code>img_height</code>. It’s important here to convert to <code>np.float32</code>, because by default, Python uses the <code>double</code> as a type for floating point numbers, but ONNX runtime model requires the Float32.</li>
</ul>
<p><a href=""></a></p>
<h2 id="run-the-model-1"><a class="header" href="#run-the-model-1"><a href="#run-the-model"></a>Run the model</a></h2>
<p>In this function you can reuse the code, that we wrote in the <a href="#run_the_model">Run the model</a> section.</p>
<pre><code>def run_model(input):
    model = ort.InferenceSession(&quot;yolov8m.onnx&quot;, providers=['CPUExecutionProvider'])
    outputs = model.run([&quot;output0&quot;], {&quot;images&quot;:input})
    return outputs[0]
</code></pre>
<p>First, you load the model from the <code>yolov8m.onnx</code> file and then use the <code>run</code> method to process the <code>input</code> and return the outputs. Finally, it returns the first output which is an array of (1,84,8400) shape.</p>
<p>Now, it’s time to process and convert this output to the array of bounding boxes.</p>
<p><a href=""></a></p>
<h2 id="process-the-output-1"><a class="header" href="#process-the-output-1"><a href="#process-the-output"></a>Process the output</a></h2>
<p>The code to process the output will include the functions from the <a href="#process_the_output">Process the output</a> section to filter out all overlapping boxes using the “Intersection over Union” algorithm. Also, it will use the array of YOLO classes to obtain the labels for each detected object. This code you can just copy/paste from the <a href="#iou">appropriate places</a>:</p>
<pre><code>def iou(box1,box2):
    return intersection(box1,box2)/union(box1,box2)

def union(box1,box2):
    box1_x1,box1_y1,box1_x2,box1_y2 = box1[:4]
    box2_x1,box2_y1,box2_x2,box2_y2 = box2[:4]
    box1_area = (box1_x2-box1_x1)*(box1_y2-box1_y1)
    box2_area = (box2_x2-box2_x1)*(box2_y2-box2_y1)
    return box1_area + box2_area - intersection(box1,box2)

def intersection(box1,box2):
    box1_x1,box1_y1,box1_x2,box1_y2 = box1[:4]
    box2_x1,box2_y1,box2_x2,box2_y2 = box2[:4]
    x1 = max(box1_x1,box2_x1)
    y1 = max(box1_y1,box2_y1)
    x2 = min(box1_x2,box2_x2)
    y2 = min(box1_y2,box2_y2)
    return (x2-x1)*(y2-y1)

yolo_classes = [
    &quot;person&quot;, &quot;bicycle&quot;, &quot;car&quot;, &quot;motorcycle&quot;, &quot;airplane&quot;, &quot;bus&quot;, &quot;train&quot;, &quot;truck&quot;, &quot;boat&quot;,
    &quot;traffic light&quot;, &quot;fire hydrant&quot;, &quot;stop sign&quot;, &quot;parking meter&quot;, &quot;bench&quot;, &quot;bird&quot;, &quot;cat&quot;, &quot;dog&quot;, &quot;horse&quot;,
    &quot;sheep&quot;, &quot;cow&quot;, &quot;elephant&quot;, &quot;bear&quot;, &quot;zebra&quot;, &quot;giraffe&quot;, &quot;backpack&quot;, &quot;umbrella&quot;, &quot;handbag&quot;, &quot;tie&quot;,
    &quot;suitcase&quot;, &quot;frisbee&quot;, &quot;skis&quot;, &quot;snowboard&quot;, &quot;sports ball&quot;, &quot;kite&quot;, &quot;baseball bat&quot;, &quot;baseball glove&quot;,
    &quot;skateboard&quot;, &quot;surfboard&quot;, &quot;tennis racket&quot;, &quot;bottle&quot;, &quot;wine glass&quot;, &quot;cup&quot;, &quot;fork&quot;, &quot;knife&quot;, &quot;spoon&quot;,
    &quot;bowl&quot;, &quot;banana&quot;, &quot;apple&quot;, &quot;sandwich&quot;, &quot;orange&quot;, &quot;broccoli&quot;, &quot;carrot&quot;, &quot;hot dog&quot;, &quot;pizza&quot;, &quot;donut&quot;,
    &quot;cake&quot;, &quot;chair&quot;, &quot;couch&quot;, &quot;potted plant&quot;, &quot;bed&quot;, &quot;dining table&quot;, &quot;toilet&quot;, &quot;tv&quot;, &quot;laptop&quot;, &quot;mouse&quot;,
    &quot;remote&quot;, &quot;keyboard&quot;, &quot;cell phone&quot;, &quot;microwave&quot;, &quot;oven&quot;, &quot;toaster&quot;, &quot;sink&quot;, &quot;refrigerator&quot;, &quot;book&quot;,
    &quot;clock&quot;, &quot;vase&quot;, &quot;scissors&quot;, &quot;teddy bear&quot;, &quot;hair drier&quot;, &quot;toothbrush&quot;
]
</code></pre>
<p>This is the <code>iou</code> function and it’s dependencies to calculate the <code>intersection</code> over <code>union</code> coefficient. Also, there is an array of YOLO classes, that the model can detect.</p>
<p>Now, having all that, you can implement the <code>process_output</code> function:</p>
<pre><code>def process_output(output, img_width, img_height):
    output = output[0].astype(float)
    output = output.transpose()

    boxes = []
    for row in output:
        prob = row[4:].max()
        if prob &lt; 0.5:
            continue
        class_id = row[4:].argmax()
        label = yolo_classes[class_id]
        xc, yc, w, h = row[:4]
        x1 = (xc - w/2) / 640 * img_width
        y1 = (yc - h/2) / 640 * img_height
        x2 = (xc + w/2) / 640 * img_width
        y2 = (yc + h/2) / 640 * img_height
        boxes.append([x1, y1, x2, y2, label, prob])

    boxes.sort(key=lambda x: x[5], reverse=True)
    result = []
    while len(boxes) &gt; 0:
        result.append(boxes[0])
        boxes = [box for box in boxes if iou(box, boxes[0]) &lt; 0.7]
    return result
</code></pre>
<ul>
<li>First two lines convert the output shape from (1,84,8400) to (8400,84) which is 8400 rows with 84 columns. Also, it converts the values of array from <code>np.float32</code> to <code>float</code> data type. It’s required to serialize result to JSON finally.</li>
<li>The first loop used to go through the rows. For each row, it calculates the probability of this prediction and skips all rows if the probability less than 0.5.</li>
<li>For rows that passed the probability check, it determines the detected object <code>class_id</code> and the text <code>label</code> of this class, using the <code>yolo_classes</code> array.</li>
<li>Then it calculates the corner coordinates of the bounding box using coordinates of its center, width and height. Also, it scales it to the original image size using the <code>img_width</code> and <code>img_height</code> parameters.</li>
<li>Then it appends the calculated bounding box to the <code>boxes</code> array.</li>
<li>The last part of the function filters the detected <code>boxes</code> using the “Non-maximum suppression” algorithm. It filters all boxes that overlap the box with the highest probability, using the <code>iou</code> function to determine the overlapping criteria value.</li>
<li>Finally, all boxes that passed the filter returned as a <code>result</code> array.</li>
</ul>
<p>That is it for Python implementation.</p>
<p>If everything implemented without mistakes, you can run this web service this way:</p>
<pre><code>python object_detector.py
</code></pre>
<p>then open <code>http://localhost:8080</code> in a web browser, and it should work exactly the same, as an original service, implemented using the PyTorch version of YOLOv8 model.</p>
<p>The ONNX runtime is a low level library, so it requires much more code to make the model work, however, the solution built this way is better to deploy in production, because it requires 10 times less hard disk space.</p>
<p>You can find the whole project with comments in <a href="https://github.com/AndreyGermanov/yolov8_onnx_python">this GitHub repository</a>.</p>
<p>The code that we developed here is oversimplified. It intended only to demonstrate how to load and run the YOLOv8 models using ONNX runtime. It does not include any error processing and exception handling. These tasks depend on real use cases, and it’s up to you how to implement it for your projects.</p>
<p>We used only a small subset of ONNX runtime Python API required for basic operations. Full reference available <a href="https://onnxruntime.ai/docs/get-started/with-python.html">here</a>.</p>
<p>If you followed this guide step by step and implemented this web service on Python, then by this moment you know the foundational algorithm on how the ONNX runtime works in general and ready to try implementing this on other languages.</p>
<p>In the sections below, we will implement the same projects with the same functions on other programming languages. If curious, you can read all next sections or move directly to the language that is interesting for you the most.</p>
<p><a href=""></a></p>
<h1 id="create-a-web-service-on-julia"><a class="header" href="#create-a-web-service-on-julia"><a href="#create-a-web-service-on-julia"></a>Create a web service on Julia</a></h1>
<p>Julia is a modern programming language well suited for data science and machine learning. It combines simple syntax with superfast runtime performance. Sometimes it’s stated as a future of machine learning and the most natural replacement for Python in this field.</p>
<p>The Julia has good libraries for machine learning and deep learning. You can read my articles which introduces these libraries to create and run <a href="https://dev.to/andreygermanov/machine-learning-with-julia-solve-titanic-competition-on-kaggle-and-deploy-trained-ai-model-as-a-web-service-f9l">classical machine learning</a> models and <a href="https://dev.to/andreygermanov/deep-learning-with-julia-how-to-build-and-train-a-model-using-a-neural-network-40pe">neural networks</a>.</p>
<p>Furthermore, having a binding to the ONNX runtime library, you can use any machine learning model, created using Python, including neural networks, created in PyTorch and TensorFlow. The YOLOv8 is not an exception, and you can run that models, exprorted to ONNX format in Julia.</p>
<p>Below, we will implement the same object detection project on Julia.</p>
<p><a href=""></a></p>
<h2 id="setup-the-project-1"><a class="header" href="#setup-the-project-1"><a href="#setup-the-project"></a>Setup the project</a></h2>
<p>Enter the Julia REPL by running the following command:</p>
<pre><code>julia
</code></pre>
<p>In the REPL, switch to <code>pkg</code> mode by pressing the <code>]</code> key and then, enter this command:</p>
<pre><code>generate object_detector
</code></pre>
<p>This command will create a folder <code>object_detector</code> and will generate the new project in it.</p>
<p>Enter the shell mode by pressing the <code>;</code> key and move to the project folder by running the following command:</p>
<pre><code>cd object_detector
</code></pre>
<p>Return to the <code>pkg</code> mode by pressing <code>Esc</code> and then press the <code>]</code> key. Then exec this command to activate the project:</p>
<pre><code>activate .
</code></pre>
<p>Then you need to install dependencies that will be used. They are ONNX runtime, the <code>Images</code> package and the Genie web framework.</p>
<pre><code>add ONNXRunTime
add Images
add Genie
</code></pre>
<ul>
<li><a href="https://github.com/jw3126/ONNXRunTime.jl">ONNXRuntime</a> - this is the Julia bindings for ONNX runtime library.</li>
<li><a href="https://juliaimages.org/latest/">Images</a> - this is the Julia Images package, which we will use to read images and convert them to pixel color arrays.</li>
<li><a href="https://geniejl.readthedocs.io/en/latest/">Genie</a> - this is a web framework for Julia, similar to Flask in Python.</li>
</ul>
<p>Then you can exit the Julia REPL by pressing <code>Ctrl+D</code>.</p>
<p>Open the project folder to see what is there:</p>
<ul>
<li><code>src</code> - the folder with Julia source code</li>
<li><code>Project.toml</code> - the project properties file</li>
<li><code>Manifest.toml</code> - the project package cache file</li>
</ul>
<p>Also, it already generated the template source code file <code>object_detector.jl</code> in the <code>src</code> folder. In this file we will do all the work. However, before we start, copy the <code>index.html</code> and the <code>yolov8m.onnx</code> files from Python project to this project root. The frontend will be the same.</p>
<p>After you’ve done that, open the <code>src/object_detector.jl</code>, erase all content from it and add the following boilerplate code:</p>
<pre><code>using Images, ONNXRunTime, Genie, Genie.Router, Genie.Requests, Genie.Renderer.Json

function main()    
    route(&quot;/&quot;) do 
        String(read(&quot;index.html&quot;))
    end 

    route(&quot;/detect&quot;, method=POST) do
        buf = IOBuffer(filespayload()[&quot;image_file&quot;].data)
        json(detect_objects_on_image(buf))
    end

    up(8080, host=&quot;0.0.0.0&quot;, async=false)
end

function detect_objects_on_image(buf)
    input, img_width, img_height = prepare_input(buf)
    output = run_model(input)
    return process_output(output, img_width,img_height)
end

function prepare_input(buf)
end

function run_model(input)
end

function process_output(output, img_width, img_height)
end

main()
</code></pre>
<p>This is a template of the whole application. You can compare this with the <a href="#setup_the_project_python">Python project</a> and see that it has almost the same structure.</p>
<ul>
<li>First you import dependencies, including ONNX Runtime, Genie Web framework and Images library.</li>
<li>Then, in the main function, you create two endpoints: one for main <code>index.html</code> page and one <code>/detect</code>, which will receive the image file and pass it to the <code>detect_objects_on_image</code> function. Then you start the web server on port 8080 which serves these two endpoints.</li>
<li>The <code>detect_objects_on_image</code> has exactly the same content as the Python one. It prepares input from the image, passes it through the model, processes the model output and returns the array of bounding boxes.</li>
<li>Then, the processed output returned to client as a JSON.</li>
</ul>
<p>In the next sections we will implement <code>prepare_input</code>, <code>run_model</code> and <code>process_output</code> functions one by one.</p>
<p><a href=""></a></p>
<h2 id="prepare-the-input-2"><a class="header" href="#prepare-the-input-2"><a href="#prepare-the-input"></a>Prepare the input</a></h2>
<pre><code>function prepare_input(buf)
    img = load(buf)
    img_height, img_width = size(img)
    img = imresize(img,(640,640))
    img = RGB.(img)
    input = channelview(img)
    input = reshape(input,1,3,640,640)
    return Float32.(input), img_width, img_height    
end
</code></pre>
<ul>
<li>This code loads the image, saves its size to <code>img_width</code> and <code>img_height</code> variables.</li>
<li>Then it resizes it, removes the transparency by converting to RGB, and converts to a tensor of pixels using the <code>channelview</code> function.</li>
<li>Then it reshapes the array to convert it from (640,640,3) shape to the (1,3,640,640) shape, that required for the ONNX model.</li>
<li>Finally, it returns the input array converted to “Float32” data type along with original <code>img_width</code> and <code>img_height</code>.</li>
</ul>
<p><a href=""></a></p>
<h2 id="run-the-model-2"><a class="header" href="#run-the-model-2"><a href="#run-the-model"></a>Run the model</a></h2>
<pre><code>function run_model(input)
    model = load_inference(&quot;yolov8m.onnx&quot;)
    outputs = model(Dict(&quot;images&quot; =&gt; input))
    return outputs[&quot;output0&quot;]
end
</code></pre>
<p>This code is almost the same as <a href="#run_the_model_python">appropriate Python code</a>.</p>
<p>First, you load the model from the <code>yolov8m.onnx</code> file and then run this model to process the <code>input</code> and return the outputs. Finally, it returns the first output which is an array of (1,84,8400) shape.</p>
<p>Now, it’s time to process and convert this output to the array of bounding boxes.</p>
<p><a href=""></a></p>
<h2 id="process-the-output-2"><a class="header" href="#process-the-output-2"><a href="#process-the-output"></a>Process the output</a></h2>
<p>The code of the <code>process_output</code> function will use the Intersection Over Union algorithm to filter out all overlapped boxes. It’s easy to rewrite the <a href="#process_the_output_python">iou, intersect and union functions</a> from Python to Julia. Include them to your code below the <code>process_output</code> function:</p>
<pre><code>function iou(box1,box2)
    return intersection(box1,box2) / union(box1,box2)
end

function union(box1,box2)
    box1_x1,box1_y1,box1_x2,box1_y2 = box1[1:4]
    box2_x1,box2_y1,box2_x2,box2_y2 = box2[1:4]
    box1_area = (box1_x2-box1_x1)*(box1_y2-box1_y1)
    box2_area = (box2_x2-box2_x1)*(box2_y2-box2_y1)
    return box1_area + box2_area - intersection(box1,box2)
end

function intersection(box1,box2)
    box1_x1,box1_y1,box1_x2,box1_y2 = box1[1:4]
    box2_x1,box2_y1,box2_x2,box2_y2 = box2[1:4]
    x1 = max(box1_x1,box2_x1)
    y1 = max(box1_y1,box2_y1)
    x2 = min(box1_x2,box2_x2)
    y2 = min(box1_y2,box2_y2)
    return (x2-x1)*(y2-y1)
end
</code></pre>
<p>Also, include the array of YOLOv8 class labels, which will be used to convert class IDs to text labels:</p>
<pre><code>yolo_classes = [
    &quot;person&quot;, &quot;bicycle&quot;, &quot;car&quot;, &quot;motorcycle&quot;, &quot;airplane&quot;, &quot;bus&quot;, &quot;train&quot;, &quot;truck&quot;, &quot;boat&quot;,
    &quot;traffic light&quot;, &quot;fire hydrant&quot;, &quot;stop sign&quot;, &quot;parking meter&quot;, &quot;bench&quot;, &quot;bird&quot;, &quot;cat&quot;, &quot;dog&quot;, &quot;horse&quot;,
    &quot;sheep&quot;, &quot;cow&quot;, &quot;elephant&quot;, &quot;bear&quot;, &quot;zebra&quot;, &quot;giraffe&quot;, &quot;backpack&quot;, &quot;umbrella&quot;, &quot;handbag&quot;, &quot;tie&quot;,
    &quot;suitcase&quot;, &quot;frisbee&quot;, &quot;skis&quot;, &quot;snowboard&quot;, &quot;sports ball&quot;, &quot;kite&quot;, &quot;baseball bat&quot;, &quot;baseball glove&quot;,
    &quot;skateboard&quot;, &quot;surfboard&quot;, &quot;tennis racket&quot;, &quot;bottle&quot;, &quot;wine glass&quot;, &quot;cup&quot;, &quot;fork&quot;, &quot;knife&quot;, &quot;spoon&quot;,
    &quot;bowl&quot;, &quot;banana&quot;, &quot;apple&quot;, &quot;sandwich&quot;, &quot;orange&quot;, &quot;broccoli&quot;, &quot;carrot&quot;, &quot;hot dog&quot;, &quot;pizza&quot;, &quot;donut&quot;,
    &quot;cake&quot;, &quot;chair&quot;, &quot;couch&quot;, &quot;potted plant&quot;, &quot;bed&quot;, &quot;dining table&quot;, &quot;toilet&quot;, &quot;tv&quot;, &quot;laptop&quot;, &quot;mouse&quot;,
    &quot;remote&quot;, &quot;keyboard&quot;, &quot;cell phone&quot;, &quot;microwave&quot;, &quot;oven&quot;, &quot;toaster&quot;, &quot;sink&quot;, &quot;refrigerator&quot;, &quot;book&quot;,
    &quot;clock&quot;, &quot;vase&quot;, &quot;scissors&quot;, &quot;teddy bear&quot;, &quot;hair drier&quot;, &quot;toothbrush&quot;
]
</code></pre>
<p>Now, it’s time to write the <code>process_output</code> function:</p>
<pre><code>function process_output(output, img_width, img_height)
    output = output[1,:,:]
    output = transpose(output)

    boxes = []
    for row in eachrow(output)        
        prob = maximum(row[5:end])
        if prob &lt; 0.5
            continue
        end
        class_id = Int(argmax(row[5:end]))
        label = yolo_classes[class_id]
        xc,yc,w,h = row[1:4]
        x1 = (xc-w/2)/640*img_width
        y1 = (yc-h/2)/640*img_height
        x2 = (xc+w/2)/640*img_width
        y2 = (yc+h/2)/640*img_height
        push!(boxes,[x1,y1,x2,y2,label,prob])
    end

    boxes = sort(boxes, by = item -&gt; item[6], rev=true)
    result = []
    while length(boxes)&gt;0
        push!(result,boxes[1])
        boxes = filter(box -&gt; iou(box,boxes[1])&lt;0.7,boxes)
    end
    return result
end
</code></pre>
<p>As a <a href="#process_the_output_python">python version</a>, it consists of three parts.</p>
<ul>
<li>In the first two lines it converts the output array from (1,84,8400) shape to the (8400,84).</li>
<li>The first loop used to go through the rows. For each row, it calculates the probability of this prediction and skips all rows if the probability less than 0.5.</li>
<li>For rows that passed the probability check, it determines the <code>class_id</code> of the detected object and the text <code>label</code> of this class, using the <code>yolo_classes</code> array.</li>
<li>Then it calculates the corner coordinates of the bounding box from coordinates of its center, width and height. Also, it scales it to the original image size using the <code>img_width</code> and <code>img_height</code> parameters.</li>
<li>Then it appends the calculated bounding box to the boxes array.</li>
<li>The last part of the function filters the detected <code>boxes</code> using the “Non-maximum suppression” algorithm. It filters all boxes that overlap the box with the highest probability, using the <code>iou</code> function to determine the overlapping criteria value.</li>
<li>Finally, all boxes that passed the filter returned as a <code>result</code> array.</li>
</ul>
<p>That is it for Julia implementation.</p>
<p>If everything implemented without mistakes, you can run this web service from the project folder using the following command:</p>
<pre><code>juila src/object_detector.py
</code></pre>
<p>then open <code>http://localhost:8080</code> in a web browser, and it should work exactly the same, as Python version.</p>
<p>The code that we developed here is oversimplified. It intended only to demonstrate how to load and run the YOLOv8 models using ONNX runtime. It does not include any error processing and exception handling. These tasks depend on real use cases, and it’s up to you how to implement it for your projects.</p>
<p>We used only a small subset of ONNX runtime Julia API required for basic operations. Full reference available <a href="https://jw3126.github.io/ONNXRunTime.jl/stable/">here</a>.</p>
<p>You can find the source code of the Julia project in <a href="https://github.com/AndreyGermanov/yolov8_onnx_julia">this repository</a>.</p>
<p><a href=""></a></p>
<h1 id="create-a-web-service-on-nodejs"><a class="header" href="#create-a-web-service-on-nodejs"><a href="#create-a-web-service-on-nodejs"></a>Create a web service on Node.js</a></h1>
<p>The Node.js needs no introduction. This is the most used platform to develop server side JavaScript applications, including backends for web services. Obviously, it would be great to have a feature to use neural networks in it. Fortunately, the <a href="https://onnxruntime.ai/docs/get-started/with-javascript.html">ONNX runtime for Node.js</a> opens the door to all machine learning models trained on PyTorch, TensorFlow and other frameworks. The YOLOv8 is not an exception. In this section, I will show how to rewrite our object detection web service on Node.js, using the ONNX runtime.</p>
<p><a href=""></a></p>
<h2 id="setup-the-project-2"><a class="header" href="#setup-the-project-2"><a href="#setup-the-project"></a>Setup the project</a></h2>
<p>Create new folder for the project like <code>object_detector</code>, open it and run:</p>
<pre><code>npm init
</code></pre>
<p>to create new Node.js project. After answering all questions about project, install required dependencies:</p>
<pre><code>npm i --save onnxruntime-node
npm i --save express
npm i --save multer
npm i --save sharp
</code></pre>
<ul>
<li><a href="https://onnxruntime.ai/docs/get-started/with-javascript.html">onnxruntime-node</a> - The Node.js library for ONNX Runtime</li>
<li><a href="https://expressjs.com/">express</a> - Express.js web framework</li>
<li><a href="https://github.com/expressjs/multer">multer</a> - Middleware for Express.js to handle file uploads</li>
<li><a href="https://sharp.pixelplumbing.com/">sharp</a> - An image processing library</li>
</ul>
<p>We are not going to change frontend, so you can copy the <code>index.html</code> file from the previous project as is to the folder of this project. Also, copy the model file <code>yolov8m.onnx</code>.</p>
<p>Create a <code>object_detector.js</code> file in which you will write the whole backend. Add the following boilerplate code to it:</p>
<pre><code>const ort = require(&quot;onnxruntime-node&quot;);
const express = require('express');
const multer = require(&quot;multer&quot;);
const sharp = require(&quot;sharp&quot;);
const fs = require(&quot;fs&quot;);

function main() {
    const app = express();
    const upload = multer();

    app.get(&quot;/&quot;, (req,res) =&gt; {
        res.end(fs.readFileSync(&quot;index.html&quot;, &quot;utf8&quot;))
    })

    app.post('/detect', upload.single('image_file'), async function (req, res) {
        const boxes = await detect_objects_on_image(req.file.buffer);
        res.json(boxes);
    });

    app.listen(8080, () =&gt; {
        console.log('Server is listening on port 8080')
    });
}

async function detect_objects_on_image(buf) {
    const [input,img_width,img_height] = await prepare_input(buf);
    const output = await run_model(input);
    return process_output(output,img_width,img_height);
}

async function prepare_input(buf) {

}

async function run_model(input) {

}

async function process_output(output, img_width, img_height) {

}

main()
</code></pre>
<ul>
<li>In the first block of <code>require</code> lines you import all required external modules: <code>ort</code> for ONNX runtime, <code>express</code> for web framework, <code>multer</code> to support file uploads in Express framework, <code>sharp</code> to load the uploaded file as an image and convert it to array of pixel colors and <code>fs</code> to read static files.</li>
<li>In the <code>main</code> function, it creates a new Express web application in the <code>app</code> variable and instantiates the <code>uploads</code> module for it.</li>
<li>Then it defines two routes: the root route that reads and returns a content of the <code>index.html</code> file and the <code>/detect</code> route that used to get uploaded file, to pass it to the <code>detect_objects_on_image</code> function and to return bounding boxes of detected objects to client.</li>
<li>The <code>detect_objects_on_image</code> looks almost the same as in <a href="#setup_the_project_python">Python</a> and <a href="#setup_the_project_julia">Julia</a> projects: first it converts the uploaded file to the array of numbers, passes it to the model, processes the output and returns the array of detected objects.</li>
<li>Then function stubs for all actions defined</li>
<li>Finally, the <code>main()</code> function called to start a web server on port 8080.</li>
</ul>
<p>The project is ready, and it’s time to implement the <code>prepare_input</code>, <code>run_model</code> and <code>process_output</code> functions one by one.</p>
<p><a href=""></a></p>
<h2 id="prepare-the-input-3"><a class="header" href="#prepare-the-input-3"><a href="#prepare-the-input"></a>Prepare the input</a></h2>
<p>We will use the <code>Sharp</code> library to load the image as an array of pixel colors. However, JavaScript does not have such packages as NumPy, which support multidimensional arrays. All arrays in JavaScript are flat. We can make “array of arrays”, but it’s not true multidimensional array with shape. For example, we can’t make the array with shape (3,640,640) which means the array of 3 matrices: first one for reds, second one for greens and third one for blues. Instead, the ONNX runtime for Javascript requires the flat array with 3<em>640</em>640=1228800 elements in which reds will go in the beginning, greens will go next and blues will go at the end. This is the result that the <code>prepare_input</code> function should return. Now let’s do it step by step.</p>
<p>First, let’s do the same actions with image as we did in other languages:</p>
<pre><code>function prepare_input(buf) {
    const img = sharp(buf);
    const md = await img.metadata();
    const [img_width,img_height] = [md.width, md.height];
    const pixels = await img.removeAlpha()
        .resize({width:640,height:640,fit:'fill'})
        .raw()
        .toBuffer();
</code></pre>
<ul>
<li>It loads the file as an image using <code>sharp</code>.</li>
<li>It saves the original image dimensions to <code>img_width</code> and <code>img_height</code></li>
<li>on the next line, it uses the chain of operations to</li>
<li>remove the transparency channel,</li>
<li>resize the image to 640x640,</li>
<li>return the image as a raw array of pixels to buffer</li>
</ul>
<p>The Sharp also can’t return a matrix of pixels because there are no matrices in JavaScript. That is why, now, you have the <code>pixels</code> array, that contains a single dimensional array of image pixels. Each pixel consists of 3 numbers: R, G, B, There are no rows and columns and pixels just go one after another. To convert it to required format, you need to convert it to 3 arrays: array of reds, array of greens and array of blues and then concatenate these 3 arrays to one in which the reds will go first, greens will go next and blues will go at the end.</p>
<p>The next image shows what you need to do with the <code>pixels</code> array and return from the function:</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--JOi_a8dK--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/swxmkfyxskoph6nmcfk9.png"><img src="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/131932aef51c4c67bcc7587ce45319f1~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=333&amp;h=210&amp;s=3106&amp;e=png&amp;b=ffffff" alt="Image description" /></a></p>
<p>The first step is to create 3 arrays for reds, greens and blues:</p>
<pre><code>const red = [], green = [], blue = [];
</code></pre>
<p>Then, traverse the <code>pixels</code> array and collect numbers to appropriate arrays:</p>
<pre><code>for (let index=0; index&lt;pixels.length; index+=3) {
    red.push(pixels[index]/255.0);
    green.push(pixels[index+1]/255.0);
    blue.push(pixels[index+2]/255.0);
}
</code></pre>
<p>This loop jumps from pixel to pixel with step=3. On each iteration, the <code>index</code> is equal to the red component of the current pixel, the <code>index+1</code> is equal to the green component and the <code>index+2</code> is equal to the blue. As you see, we divide components by 255.0 to scale and put to appropriate arrays.</p>
<p>The only thing that left to do after this, is to concatenate these arrays in correct order and return along with <code>img_width</code> and <code>img_height</code>.</p>
<p>Here is a full code of the <code>prepare_input</code> function:</p>
<pre><code>async function prepare_input(buf) {
    const img = sharp(buf);
    const md = await img.metadata();
    const [img_width,img_height] = [md.width, md.height];
    const pixels = await img.removeAlpha()
        .resize({width:640,height:640,fit:'fill'})
        .raw()
        .toBuffer();

    const red = [], green = [], blue = [];
    for (let index=0; index&lt;pixels.length; index+=3) {
        red.push(pixels[index]/255.0);
        green.push(pixels[index+1]/255.0);
        blue.push(pixels[index+2]/255.0);
    }

    const input = [...red, ...green, ...blue];
    return [input, img_width, img_height];
}
</code></pre>
<p>Perhaps there are other less resource consuming ways exist to convert the <code>pixels</code> array to required form without temporary arrays (you can try your options), but I just wanted to be logical and simple in this implementation.</p>
<p>Now, let’s run this input through the YOLOv8 model using the ONNX runtime.</p>
<p><a href=""></a></p>
<h2 id="run-the-model-3"><a class="header" href="#run-the-model-3"><a href="#run-the-model"></a>Run the model</a></h2>
<p>The code of the <code>run_model</code> function follows:</p>
<pre><code>async function run_model(input) {
    const model = await ort.InferenceSession.create(&quot;yolov8m.onnx&quot;);
    input = new ort.Tensor(Float32Array.from(input),[1, 3, 640, 640]);
    const outputs = await model.run({images:input});
    return outputs[&quot;output0&quot;].data;
}
</code></pre>
<ul>
<li>On the first line, we load the model from <code>yolov8m.onnx</code> file.</li>
<li>On the second line, we prepare the input array. The ONNX Runtime requires to convert it to an internal <code>ort.Tensor</code> object. Constructor of this object require specifying the flat numbers array, converted to Float32 and a shape, that this array should have, which is as usual [1,3,640,640].</li>
<li>On the third line, we run the model with constructed tensor and receive <code>outputs</code>.</li>
<li>Finally, we return the data of the first output. In JavaScript version, we require specifying the name of this output, instead of index. The name of the YOLOv8 output, as you have seen in the <a href="https://dev.to/andreygermanov/how-to-deploy-yolov8-models-using-python-julia-nodejs-javascript-go-and-rust-173n-temp-slug-998780?preview=638817ecd060acb0e7f313d0c39f51dd5f38ce1a95abcf3830b559fb6d5f0554d62a18861f5bda33f0a10cd976d52b9cf404b210ae651a5c83a21727#run_the_model">beginning of this article</a>, is <code>output0</code>.</li>
</ul>
<p>As a result, the function returns the array with (1,84,8400) shape, or you can think about this as about 84x8400 matrix. However, JavaScript does not support matrices, that is why, it returns an output as a single dimension array. The numbers in this array ordered as 84x8400, but as a flat array of 705600 items. So, you can’t transpose it, and you can’t traverse it by rows in a loop, because it’s required to specify the absolute position of the item. But do not worry, in the next section we will learn how to deal with it.</p>
<p><a href=""></a></p>
<h2 id="process-the-output-3"><a class="header" href="#process-the-output-3"><a href="#process-the-output"></a>Process the output</a></h2>
<p>The code of the <code>process_output</code> function will use the Intersection Over Union algorithm to filter out all overlapped boxes. It’s easy to rewrite the <a href="#process_the_output_python">iou, intersect and union functions</a> from Python to JavaScript. Include them to your code below the <code>process_output</code> function:</p>
<pre><code>function iou(box1,box2) {
    return intersection(box1,box2)/union(box1,box2);
}

function union(box1,box2) {
    const [box1_x1,box1_y1,box1_x2,box1_y2] = box1;
    const [box2_x1,box2_y1,box2_x2,box2_y2] = box2;
    const box1_area = (box1_x2-box1_x1)*(box1_y2-box1_y1)
    const box2_area = (box2_x2-box2_x1)*(box2_y2-box2_y1)
    return box1_area + box2_area - intersection(box1,box2)
}

function intersection(box1,box2) {
    const [box1_x1,box1_y1,box1_x2,box1_y2] = box1;
    const [box2_x1,box2_y1,box2_x2,box2_y2] = box2;
    const x1 = Math.max(box1_x1,box2_x1);
    const y1 = Math.max(box1_y1,box2_y1);
    const x2 = Math.min(box1_x2,box2_x2);
    const y2 = Math.min(box1_y2,box2_y2);
    return (x2-x1)*(y2-y1)
}
</code></pre>
<p>also, you will need to find YOLO class label by ID, so add the <code>yolo_classes</code> array to your code:</p>
<pre><code>const yolo_classes = [
    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat',
    'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
    'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase',
    'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
    'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
    'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
    'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
];
</code></pre>
<p>Now let’s implement the <code>process_output</code> function. As mentioned above, the function receives output as a flat array that ordered as 84x8400 matrix. When work in Python, we had a NumPy to transform it to 8400x84 and then traverse in a loop by row. Here, we can’t transform it this way, so, we need to traverse it by columns.</p>
<pre><code>boxes=[];
for (index=0;index&lt;8400;index++) {

}
</code></pre>
<p>Moreover, you do not have row indexes and column indexes, but have only absolute indexes. You can only virtually reshape this flat array to 84x8400 matrix in your head and use this representation to calculate these absolute indexes, using those “virtual rows” and “virtual columns”.</p>
<p>Let’s display how the <code>output</code> array looks to clarify this:</p>
<p><a href="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/c2ba516a5d0e464a8e4ed6f0b5388d09~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=537&amp;h=537&amp;s=8285&amp;e=png&amp;b=ffffff"><img src="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/ebad3f80349e42e9b75b1374ac93fe7b~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=537&amp;h=537&amp;s=8285&amp;e=png&amp;b=ffffff" alt="Image description" /></a></p>
<p>Here we virtually reshaped the <code>output</code> array with 705600 items to a 84x8400 matrix. It has 8400 columns with indexes from 0 to 8399 and 84 rows with indexes from 0 to 83. The absolute indexes of items have written inside boxes. Each detected object represented by a column in this matrix. The first 4 rows of each column with indexes from 0 to 3 are coordinates of the bounding box of the appropriate object: x_center, y_center, width and height. Cells in the other 80 rows, starting from 4 to 83 contain the probabilities that the object belongs to each of the 80 YOLO classes.</p>
<p>I drew this table to understand how to calculate the absolute index of any item in it, knowing the row and column indexes. For example, how you calculate the index of first greyed item that stands on row 2 and column 2, which is a bounding box <code>width</code> of the third detected object? If you think about this a little more, you will find, that to calculate this you need to multiply the row index by the length of the row (8400) and add the column index to this. Let’s check it: 8400<em>2+2=16802. Now, let’s calculate the index of the item below it, which is a height of the same object: 8400</em>3+2=25202. Bingo! Matched again! Finally, let’s check the bottom gray box, which is a probability that object 8399 belongs to class 79 (toothbrush): 8400*83+8398=705598. Great, so you have a formula to calculate absolute index: <code>8400*row_index+column_index</code>.</p>
<p>Let’s return to our empty loop. Assuming that the <code>index</code> loop counter is an index of current column and that coordinates of bounding box located in rows 0-3 of current column, we can extract them this way:</p>
<pre><code>boxes=[];
for (index=0;index&lt;8400;index++) {
    const xc = output[8400*0+index];
    const yc = output[8400*1+index];
    const w = output[8400*2+index];
    const h = output[8400*3+index];
}
</code></pre>
<p>Then you can calculate the corners of the bounding box and scale them to the size of the original image:</p>
<pre><code>const x1 = (xc-w/2)/640*img_width;
const y1 = (yc-h/2)/640*img_height;
const x2 = (xc+w/2)/640*img_width;
const y2 = (yc+h/2)/640*img_height;
</code></pre>
<p>Now similarly you need to get probabilities of the object, that goes in rows from 4 to 83, find which of them is biggest and the index of this probability, and save these values to the <code>prob</code> and the <code>class_id</code> variables. You can write a nested loop, that traverses rows from 4 to 83 and saves the highest value, and it’s index:</p>
<pre><code>let class_id = 0, prob = 0;
for (let col=4;col&lt;84;col++) {
    if (output[8400*col+index]&gt;prob) {
        prob = output[8400*col+index];
        class_id = col - 4;
    }
}
</code></pre>
<p>It works fine, but I’d better rewrite this in a functional way:</p>
<pre><code>const [class_id,prob] = [...Array(80).keys()]
    .map(col =&gt; [col, output[8400*(col+4)+index]])
    .reduce((accum, item) =&gt; item[1]&gt;accum[1] ? item : accum,[0,0]);
</code></pre>
<ul>
<li>The first line <code>[...Array(80).keys()]</code> generates a range array with numbers from 0 to 79</li>
<li>Then, the <code>map</code> function constructs the array of probabilities for each class_id where each item collected as a <code>[class_id,probability]</code> array</li>
<li>The <code>reduce</code> function reduces the array to a single item, that contains maximum probability and its class id.</li>
<li>This item finally returned and destructured to <code>class_id</code> and <code>prob</code> variables.</li>
</ul>
<p>Then, having the maximum probability and class_id, you can either skip that object, if the probability is less than 0.5 or find the label of this class.</p>
<p>Here is a final code, that processes and collects bounding boxes to the <code>boxes</code> array:</p>
<pre><code>    let boxes = [];
    for (let index=0;index&lt;8400;index++) {
        const [class_id,prob] = [...Array(80).keys()]
            .map(col =&gt; [col, output[8400*(col+4)+index]])
            .reduce((accum, item) =&gt; item[1]&gt;accum[1] ? item : accum,[0,0]);
        if (prob &lt; 0.5) {
            continue;
        }
        const label = yolo_classes[class_id];
        const xc = output[index];
        const yc = output[8400+index];
        const w = output[2*8400+index];
        const h = output[3*8400+index];
        const x1 = (xc-w/2)/640*img_width;
        const y1 = (yc-h/2)/640*img_height;
        const x2 = (xc+w/2)/640*img_width;
        const y2 = (yc+h/2)/640*img_height;
        boxes.push([x1,y1,x2,y2,label,prob]);
    }
</code></pre>
<p>The last step is to filter the <code>boxes</code> array using “Non-maximum suppression”, to exclude all overlapping boxes from it. This code is close to the <a href="#process_the_output_python">Python implementation</a>:</p>
<pre><code>boxes = boxes.sort((box1,box2) =&gt; box2[5]-box1[5])
const result = [];
while (boxes.length&gt;0) {
    result.push(boxes[0]);
    boxes = boxes.filter(box =&gt; iou(boxes[0],box)&lt;0.7);
}
</code></pre>
<ul>
<li>We sort the boxes by probability in reverse order to put the boxes with the highest probability to the top</li>
<li>In a loop, we put the box with the highest probability to <code>result</code></li>
<li>Then we filter out all boxes that overlap the selected box too much (all boxes that have IoU&gt;0.7 with this box)</li>
</ul>
<p>That’s all! For convenience, here is a full code of the <code>process_output</code> function:</p>
<pre><code>function process_output(output, img_width, img_height) {
    let boxes = [];
    for (let index=0;index&lt;8400;index++) {
        const [class_id,prob] = [...Array(80).keys()]
            .map(col =&gt; [col, output[8400*(col+4)+index]])
            .reduce((accum, item) =&gt; item[1]&gt;accum[1] ? item : accum,[0,0]);
        if (prob &lt; 0.5) {
            continue;
        }
        const label = yolo_classes[class_id];
        const xc = output[index];
        const yc = output[8400+index];
        const w = output[2*8400+index];
        const h = output[3*8400+index];
        const x1 = (xc-w/2)/640*img_width;
        const y1 = (yc-h/2)/640*img_height;
        const x2 = (xc+w/2)/640*img_width;
        const y2 = (yc+h/2)/640*img_height;
        boxes.push([x1,y1,x2,y2,label,prob]);
    }

    boxes = boxes.sort((box1,box2) =&gt; box2[5]-box1[5])
    const result = [];
    while (boxes.length&gt;0) {
        result.push(boxes[0]);
        boxes = boxes.filter(box =&gt; iou(boxes[0],box)&lt;0.7);
    }
    return result;
}
</code></pre>
<p>If you like to work with this output in a more convenient “Pythonic” way, there is a <a href="https://www.npmjs.com/package/numjs">NumJS</a> library that emulates NumPy in JavaScript. You can use it to physically reshape the output to 84x8400, then transpose to 8400x84 and then traverse detected objects by row.</p>
<p>However, the option to work with single dimension array as with matrix described in this section is the most efficient, because we got all values we need without additional array transformations. I think that installing additional external dependency is overkill for this case.</p>
<p>That is it for Node.js implementation. If you wrote everything correctly, then you can start this web service by running the following command:</p>
<pre><code>node object_detector.js
</code></pre>
<p>and open <code>http://localhost:8080</code> in a web browser.</p>
<p>The code that we developed here is oversimplified. It intended only to demonstrate how to load and run the YOLOv8 models using ONNX runtime. It does not include any error processing and exception handling. These tasks depend on real use cases, and it’s up to you how to implement it for your projects.</p>
<p>We used only a small subset of ONNX runtime JavaScript API required for basic operations. Full reference available <a href="https://onnxruntime.ai/docs/get-started/with-javascript.html">here</a>.</p>
<p>You can find a source code of Node.js object detector web service in <a href="https://github.com/AndreyGermanov/yolov8_onnx_nodejs">this repository</a>.</p>
<p><a href=""></a></p>
<h2 id="create-a-web-service-on-javascript"><a class="header" href="#create-a-web-service-on-javascript"><a href="#create-a-web-service-on-javascript"></a>Create a web service on JavaScript</a></h2>
<p>Could you ever realize that you can write all code for object detector right in the HTML page? Using the ONNX library for JavaScript, you can process the image right in the frontend, without sending it to any server. Furthermore, you can reuse most code that we wrote for Node.js because the underlying ONNX runtime API is the same.</p>
<p><a href=""></a></p>
<h2 id="setup-the-project-3"><a class="header" href="#setup-the-project-3"><a href="#setup-the-project"></a>Setup the project</a></h2>
<p>You can reuse the frontend from Node.js project. Create a new folder and copy the <code>index.html</code> and <code>yolov8m.onnx</code> files to it.</p>
<p>Then, open the <code>index.html</code> and add the JavaScript library for ONNX runtime to the head section of the HTML:</p>
<pre><code>&lt;script src=&quot;https://cdn.jsdelivr.net/npm/onnxruntime-web/dist/ort.min.js&quot;&gt;&lt;/script&gt;
</code></pre>
<p>This library exposes the <code>ort</code> global variable, that is a root of the ONNX runtime API. You can use it to instantiate and run models the same way as we used the <code>ort</code> variable in the Node.js project.</p>
<p>Perhaps in a moment when you read it, the URL to the library will change, so you can look in the <a href="https://onnxruntime.ai/docs/get-started/with-javascript.html#onnx-runtime-web">official documentation</a> for installation instructions.</p>
<p>This is an <code>index.html</code> file that you should have in the beginning:</p>
<pre><code>&lt;!DOCTYPE html&gt;
&lt;html lang=&quot;en&quot;&gt;
&lt;head&gt;
    &lt;meta charset=&quot;UTF-8&quot;&gt;
    &lt;title&gt;YOLOv8 Object Detection&lt;/title&gt;
    &lt;style&gt;
      canvas {
          display:block;
          border: 1px solid black;
          margin-top:10px;
      }
    &lt;/style&gt;
    &lt;script src=&quot;https://cdn.jsdelivr.net/npm/onnxruntime-web/dist/ort.min.js&quot;&gt;&lt;/script&gt;
&lt;/head&gt;
&lt;body&gt;
    &lt;input id=&quot;uploadInput&quot; type=&quot;file&quot;/&gt;
    &lt;canvas&gt;&lt;/canvas&gt;
    &lt;script&gt;

       const input = document.getElementById(&quot;uploadInput&quot;);
       input.addEventListener(&quot;change&quot;,async(event) =&gt; {
           const data = new FormData();
           data.append(&quot;image_file&quot;,event.target.files[0],&quot;image_file&quot;);
           const response = await fetch(&quot;/detect&quot;,{
               method:&quot;post&quot;,
               body:data
           });
           const boxes = await response.json();
           draw_image_and_boxes(event.target.files[0],boxes);
       })

      function draw_image_and_boxes(file,boxes) {
          const img = new Image()
          img.src = URL.createObjectURL(file);
          img.onload = () =&gt; {
              const canvas = document.querySelector(&quot;canvas&quot;);
              canvas.width = img.width;
              canvas.height = img.height;
              const ctx = canvas.getContext(&quot;2d&quot;);
              ctx.drawImage(img,0,0);
              ctx.strokeStyle = &quot;#00FF00&quot;;
              ctx.lineWidth = 3;
              ctx.font = &quot;18px serif&quot;;
              boxes.forEach(([x1,y1,x2,y2,label]) =&gt; {
                  ctx.strokeRect(x1,y1,x2-x1,y2-y1);
                  ctx.fillStyle = &quot;#00ff00&quot;;
                  const width = ctx.measureText(label).width;
                  ctx.fillRect(x1,y1,width+10,25);
                  ctx.fillStyle = &quot;#000000&quot;;
                  ctx.fillText(label, x1, y1+18);
              });
          }
      }
    &lt;/script&gt;
&lt;/body&gt;
&lt;/html&gt;
</code></pre>
<p>To run ONNX runtime in a browser, you need to run the content of this folder on a web server. You can use VS Code embedded web server to run the <code>index.html</code> in it.</p>
<p>When it works, let’s load the image and prepare an input array from it.</p>
<p><a href=""></a></p>
<h1 id="prepare-the-input-4"><a class="header" href="#prepare-the-input-4"><a href="#prepare-the-input"></a>Prepare the input</a></h1>
<p>User loads the image by using the upload file field to select the image file. This process implemented in the <code>change</code> event listener:</p>
<pre><code>input.addEventListener(&quot;change&quot;,async(event) =&gt; {
    const data = new FormData();
           data.append(&quot;image_file&quot;,event.target.files[0],&quot;image_file&quot;);
    const response = await fetch(&quot;/detect&quot;,{
        method:&quot;post&quot;,
        body:data
    });
    const boxes = await response.json();
    draw_image_and_boxes(event.target.files[0],boxes);
})
</code></pre>
<p>In this code, you used <code>fetch</code> to post the file from <code>event.target.files[0]</code> variable to the backend. Then backend returns the array of bounding boxes that decoded to a <code>boxes</code> array.</p>
<p>However, in this version, we will not have a backend to load the image to. All code we will write here, in the <code>index.html</code> file, including the <code>detect_objects_on_image</code> and all other functions. So you need to remove this <code>fetch</code> call and just pass the file to the <code>detect_objects_on_image</code> function:</p>
<pre><code>input.addEventListener(&quot;change&quot;,async(event) =&gt; {
    const boxes = await detect_objects_on_image(event.target.files[0]);
    draw_image_and_boxes(event.target.files[0],boxes);
})
</code></pre>
<p>Then, define the <code>detect_objects_on_image</code> function, which is the same as in <a href="#setup_the_project_nodejs">Node.js example</a>:</p>
<pre><code>async function detect_objects_on_image(buf) {
    const [input,img_width,img_height] = await prepare_input(buf);
    const output = await run_model(input);
    return process_output(output,img_width,img_height);
}
</code></pre>
<p>The only difference here is that <code>buf</code> is a File object, that user selected in the upload file field. You need to load this file as an image in the browser and convert to array of pixels. The most common way to load an image in HTML and JavaScript is using the <a href="https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API">HTML5 canvas</a> object. This object loads the image as a flat array of pixel colors, almost the same, as the <code>Sharp</code> library loaded it in the <a href="#prepare_the_input_nodejs">Node.js version</a>. This work we will do in the <code>prepare_input</code> function:</p>
<pre><code> async function prepare_input(buf) {
      const img = new Image();
      img.src = URL.createObjectURL(buf);
      img.onload = () =&gt; {
          const [img_width,img_height] = [img.width, img.height]
          const canvas = document.createElement(&quot;canvas&quot;);
          canvas.width = 640;
          canvas.height = 640;
          const context = canvas.getContext(&quot;2d&quot;);
          context.drawImage(img,0,0,640,640);
          const imgData = context.getImageData(0,0,640,640);
          const pixels = imgData.data;
      }
  }
</code></pre>
<ul>
<li>The HTML5 Canvas element can draw the HTML images, that is why, we need to load the file to the <code>Image()</code> object first.</li>
<li>Then, before drawing it on the canvas, we need to ensure that the image is loaded. That is why, all next code we write in the <code>onload()</code> event handler of the image object, that executed only after the image is loaded.</li>
<li>We save the original image size to <code>img_width</code> and <code>img_height</code>.</li>
<li>Then we create a <code>canvas</code> object and set it size to 640x640, because this is a size, that required by the YOLOv8 model.</li>
<li>Then we get the HTML5 canvas drawing <code>context</code> of created canvas to draw the image on the canvas. The <code>drawImage</code> method allows drawing and resize at the same time, that is why we set the size of image on the canvas to 640x640.</li>
<li>Then the <a href="https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/getImageData">getImageData()</a> used to get the <a href="https://developer.mozilla.org/en-US/docs/Web/API/ImageData">imageData</a> object with image pixels.</li>
<li>The only required property of the ImageData object is the <code>data</code> which contains the array of pixels that we need.</li>
</ul>
<p>Now you have the <code>pixels</code> array, that contains one dimensional array of image pixels. Each pixel consists of 4 numbers that define the color components: R, G, B, A where R=red, G=green, B=blue and A=transparency(Alpha channel). There are no rows and columns in this array, and pixels just go one after another. To convert it to required format, you need to convert it to 3 arrays: array of reds, array of greens and array of blues first and then concatenate these 3 arrays to one in which the reds will go first, greens will go next and blues will go at the end.</p>
<p>The next image shows what you need to do with the <code>pixels</code> array and return from the function:</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--ZdVS-88N--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/lgpx3iq4yrmpn757mqdo.png"><img src="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/9269c1109900422097ff8bd0302fbb17~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=442&amp;h=210&amp;s=3588&amp;e=png&amp;b=ffffff" alt="Image description" /></a></p>
<p>The first step is to create 3 arrays for reds, greens and blues:</p>
<pre><code>const red = [], green = [], blue = [];
</code></pre>
<p>Then, traverse the <code>pixels</code> array and collect numbers to appropriate arrays:</p>
<pre><code>for (let index=0; index&lt;pixels.length; index+=4) {
    red.push(pixels[index]/255.0);
    green.push(pixels[index+1]/255.0);
    blue.push(pixels[index+2]/255.0);
}
</code></pre>
<p>This loop jumps from pixel to pixel with step=4. On each iteration, the <code>index</code> is equal to the red component of the current pixel, the <code>index+1</code> is equal to the green component and the <code>index+2</code> is equal to blue. The fourth component of color is skipped in this loop. As you see, we divide components by 255.0 to scale and put to appropriate arrays.</p>
<p>The only thing that left to do after this, is to concatenate these arrays in correct order and return along with <code>img_width</code> and <code>img_height</code>. But we can’t add the <code>return</code> from the <code>prepare_input</code> function here, because we write all this code inside an internal function, in the <code>onload</code> event handler and by writing return, we are just returning from this handler but not from the <code>prepare_input</code> function.</p>
<p>To handle this issue, we wrap the code of the <code>prepare_input</code> function to the <code>Promise</code> and return it. Then, inside the event handler, we will use the <code>resolve([input, img_width, img_height])</code> to resolve that promise with results, that will be returned.</p>
<p>Here is a full code of the <code>prepare_input</code> function:</p>
<pre><code>async function prepare_input(buf) {
    return new Promise(resolve =&gt; {
        const img = new Image();
        img.src = URL.createObjectURL(buf);
        img.onload = () =&gt; {
            const [img_width,img_height] = [img.width, img.height]
            const canvas = document.createElement(&quot;canvas&quot;);
            canvas.width = 640;
            canvas.height = 640;
            const context = canvas.getContext(&quot;2d&quot;);
            context.drawImage(img,0,0,640,640);
            const imgData = context.getImageData(0,0,640,640);
            const pixels = imgData.data;

            const red = [], green = [], blue = [];
            for (let index=0; index&lt;pixels.length; index+=4) {
                red.push(pixels[index]/255.0);
                green.push(pixels[index+1]/255.0);
                blue.push(pixels[index+2]/255.0);
            }
            const input = [...red, ...green, ...blue];
            resolve([input, img_width, img_height])
        }
    })
}
</code></pre>
<p><a href=""></a></p>
<h2 id="run-the-model-and-process-the-output"><a class="header" href="#run-the-model-and-process-the-output"><a href="#run-the-model-and-process-the-output"></a>Run the model and process the output</a></h2>
<p>This <code>prepare_input</code> function returns the input exactly in the same format as in the Node.js version. That is why, all other code, including <a href="#run_the_model_nodejs">run_model</a>, <a href="#process_the_output_nodejs">process_output, iou, intersection and union</a> functions can be copy/pasted as is from the Node.js project.</p>
<p>After it’s done, the JavaScript web service finished!</p>
<p>Now you can use any web server to run the <code>index.html</code> file and try this wonderful feature - to run neural network models right in a web browser frontend.</p>
<p>The code that we developed here is oversimplified. It intended only to demonstrate how to load and run the YOLOv8 models using ONNX runtime. It does not include any error processing and exception handling. These tasks depend on real use cases, and it’s up to you how to implement it for your projects.</p>
<p>We used only a small subset of ONNX runtime JavaScript API required for basic operations. Full reference available <a href="https://onnxruntime.ai/docs/get-started/with-javascript.html">here</a>.</p>
<p>You can find a source code of JavaScript object detector web service in <a href="https://github.com/AndreyGermanov/yolov8_onnx_javascript">this repository</a>.</p>
<p><a href=""></a></p>
<h1 id="create-a-web-service-on-go"><a class="header" href="#create-a-web-service-on-go"><a href="#create-a-web-service-on-go"></a>Create a web service on Go</a></h1>
<p>Go is the first statically typed and compiled programming language in our journey. From my point of view, the greatest thing about Go is how you can deploy the apps written on it. You can compile all your code and it’s dependencies to a single binary executable, then just copy this file to a production server and run. This is how the whole deployment process looks on Go. You do not need to install any third party dependencies to run Go programs, that is why, the Go applications usually compact and convenient to update. Also, the go is faster than Python and JavaScript. Definitely, it would be great to have an opportunity to deploy neural networks this way. Fortunately, there are several ONNX runtime bindings exist that will help us to achieve this goal.</p>
<p><a href=""></a></p>
<h2 id="setup-the-project-4"><a class="header" href="#setup-the-project-4"><a href="#setup-the-project"></a>Setup the project</a></h2>
<p>Create a new folder, enter it and run:</p>
<pre><code>go mod init object_detector
</code></pre>
<p>This command will initialize the <code>object_detector</code> project in the current folder.</p>
<p>Install required external modules:</p>
<pre><code>go get github.com/yalue/onnxruntime_go
go get github.com/nfnt/resize
</code></pre>
<ul>
<li><a href="https://github.com/yalue/onnxruntime_go">github.com/yalue/onnxruntime_go</a> - ONNX runtime library bindings for Golang</li>
<li><a href="https://github.com/nfnt/resize">github.com/nfnt/resize</a> - the library to resize images. (Perhaps you can find more modern library, but I just used this one because it works properly)</li>
</ul>
<p>The other thing for which I respect Go, is that all other modules, including web framework and image processing functions, already exist in standard library.</p>
<p>The ONNX module for Go provides the API, but does not contain the Microsoft ONNX runtime library itself. Instead, it has a function to specify a path, in which this library located. Here you have two options: install the Microsoft ONNX runtime library to a well known system path, or download the version for your operating system and put it to the project folder. For this project, I will go the second way, to make the project autonomous and independent of operating system setup.</p>
<p>Go to the Releases page: <a href="https://github.com/microsoft/onnxruntime/releases">https://github.com/microsoft/onnxruntime/releases</a> and download the archive for your operating system. After it’s done, extract the files from the archive and copy all files from the <code>lib</code> subfolder to the project.</p>
<p>We are not going to change the frontend, that is why, just copy the <code>index.html</code> file from one of the previous projects to current folder. Also, copy the <code>yolov8m.onnx</code> model file.</p>
<p>By convention, the main file of Go project should have a <code>main.go</code> name. So, create this file and put the following boilerplate code to it:</p>
<pre><code>package main

import (
    &quot;encoding/json&quot;
    &quot;github.com/nfnt/resize&quot;
    ort &quot;github.com/yalue/onnxruntime_go&quot;
    &quot;image&quot;
    _ &quot;image/gif&quot;
    _ &quot;image/jpeg&quot;
    _ &quot;image/png&quot;
    &quot;io&quot;
    &quot;math&quot;
    &quot;net/http&quot;
    &quot;os&quot;
    &quot;sort&quot;
)

func main() {
    server := http.Server{
    Addr: &quot;0.0.0.0:8080&quot;,
    }
    http.HandleFunc(&quot;/&quot;, index)
    http.HandleFunc(&quot;/detect&quot;, detect)
    server.ListenAndServe()
}

func index(w http.ResponseWriter, _ *http.Request) {
    file, _ := os.Open(&quot;index.html&quot;)
    buf, _ := io.ReadAll(file)
    w.Write(buf)
}

func detect(w http.ResponseWriter, r *http.Request) {
    r.ParseMultipartForm(0)
    file, _, _ := r.FormFile(&quot;image_file&quot;)
    boxes := detect_objects_on_image(file)
    buf, _ := json.Marshal(&amp;boxes)
    w.Write(buf)
}

func detect_objects_on_image(buf io.Reader) [][]interface{} {
    input, img_width, img_height := prepare_input(buf)
    output := run_model(input)
    return process_output(output, img_width, img_height)
}

func prepare_input(buf io.Reader) ([]float32, int64, int64) {

}

func run_model(input []float32) []float32 {

}

func process_output(output []float32, img_width, img_height int64) [][]interface{} {

}
</code></pre>
<p>First, we import required packages. Most of them go from Go standard library:</p>
<ul>
<li><code>encoding/json</code> - to encode bounding boxes to JSON before sending response</li>
<li><code>github.com/nfnt/resize</code> - to resize image to 640x640</li>
<li><code>ort &quot;github.com/yalue/onnxruntime_go&quot;</code> - ONNX runtime library. We import it as <code>ort</code> variable</li>
<li><code>image</code>, <code>image/gif</code>, <code>image/jpeg</code>, <code>image/png</code> - image library and libraries to support images of different formats</li>
<li><code>io</code> - to read data from local files</li>
<li><code>math</code> - for <code>Max</code> an <code>Min</code> functions</li>
<li><code>net/http</code> - to create and run a web server</li>
<li><code>os</code> - to open local files</li>
<li><code>sort</code> - to sort bounding boxes</li>
</ul>
<p>Then, the <code>main</code> function defines two HTTP endpoints: <code>index</code> and <code>detect</code> that are handled by appropriate functions and starts the web server on port 8080 that handles these endpoints.</p>
<p>The <code>index</code> endpoint just returns the content of the <code>index.html</code> file.</p>
<p>The <code>detect</code> endpoint receives the uploaded image file, sends it to the <code>detect_objects_on_image</code> function, which passes it through the YOLOv8 model. Then it receives the array of bounding boxes, encodes them to JSON and returns this JSON to the frontend.</p>
<p>The <code>detect_objects_on_image</code> is the same as in previous projects. The only difference is the type of value that it returns, which is the <code>[][]interface{}</code>. The <code>detect_objects_on_image</code> should return an array of bounding boxes. Each bounding box is an array of 6 items (x1,y1,x2,y2,label, probability). These items have different types. However, the Go as strong typed programming language does not allow having array with items of different types. But it has a special type <code>interface{}</code> which can hold value of any type. This is a common trick in the Go to define a variable using the <code>interface{}</code> type, if it can have values of different types. That is why, to have an array of items of different types, you need to create an array of interfaces: <code>[]interface{}</code>. Consequently, the bounding box is an array of interfaces and the array of bounding boxes is an array of interface arrays: <code>[][]interface{}</code>.</p>
<p>Then there are stubs of <code>prepare_input</code>, <code>run_model</code> and <code>process_output</code> functions defined. In the next sections, we will implement them one by one.</p>
<p><a href=""></a></p>
<h2 id="prepare-the-input-5"><a class="header" href="#prepare-the-input-5"><a href="#prepare-the-input"></a>Prepare the input</a></h2>
<p>To prepare the input for the YOLOv8 model, you need to load the image, resize it and convert to a tensor of (3,640,640) shape where the first item is an array of red components of image pixels, second item is an array of greens and the last component is an array of blues. Furthermore, the ONNX library for Go, requires you to provide this tensor as a flat array, e.g. to concat these three arrays one after one, like displayed on the next image.</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--qXVbzQSo--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/sj6udnkxhmu054a1at57.png"><img src="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/5909d8d2325f43c9a6416fe9f0f8c87b~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=333&amp;h=130&amp;s=1732&amp;e=png&amp;b=ffffff" alt="Image description" /></a></p>
<p>So, let’s load and resize the image first:</p>
<pre><code>func prepare_input(buf io.Reader) ([]float32, int64, int64) {
    img, _, _ := image.Decode(buf)
    size := img.Bounds().Size()
    img_width, img_height := int64(size.X), int64(size.Y)
    img = resize.Resize(640, 640, img, resize.Lanczos3)
</code></pre>
<p>This code:</p>
<ul>
<li>loaded the image,</li>
<li>saved the size of original image to <code>img_width</code>, <code>img_height</code> variables</li>
<li>resized it to 640x640 pixels</li>
</ul>
<p>Then you need to collect the colors of pixels to different arrays, that you should define first:</p>
<pre><code>    red := []float32{}
    green := []float32{}
    blue := []float32{}
</code></pre>
<p>Then you need to extract pixels and their colors from the image. To do that, the <code>img</code> object has <code>.At(x,y)</code> method, that can be used to get the pixel object at a specified point of the image. The color object, returned by this method has an <code>.RGBA()</code> method, that returns the color components as an array of 4 elements: [R,G,B,A]. You need to extract only R,G,B and scale them.</p>
<p>Now, you have everything to traverse the image and collect pixel colors to created arrays:</p>
<pre><code>for y := 0; y &lt; 640; y++ {
    for x := 0; x &lt; 640; x++ {
        r, g, b, _ := img.At(x, y).RGBA()
        red = append(red, float32(r/257)/255.0)
        green = append(green, float32(g/257)/255.0)
        blue = append(blue, float32(b/257)/255.0)
    }
}
</code></pre>
<ul>
<li>This code traverses all rows and columns of image.</li>
<li>It extracts array of color components of each pixel and destructures them to <code>r</code>, <code>g</code> and <code>b</code> variables.</li>
<li>Then it scales these components and appends them to appropriate arrays.</li>
</ul>
<p>Finally, you need to concatenate these arrays to a single one in correct order:</p>
<pre><code>input := append(red, green...)
input = append(input, blue...)
</code></pre>
<p>So, the <code>input</code> variable contains the input, required for ONNX runtime. Here is a full code of this function, which returns the <code>input</code> and the size of original image that will be used later when process the output from the model.</p>
<pre><code>func prepare_input(buf io.Reader) ([]float32, int64, int64) {
    img, _, _ := image.Decode(buf)
    size := img.Bounds().Size()
    img_width, img_height := int64(size.X), int64(size.Y)
    img = resize.Resize(640, 640, img, resize.Lanczos3)
    red := []float32{}
    green := []float32{}
    blue := []float32{}
    for y := 0; y &lt; 640; y++ {
        for x := 0; x &lt; 640; x++ {
            r, g, b, _ := img.At(x, y).RGBA()
            red = append(red, float32(r/257)/255.0)
            green = append(green, float32(g/257)/255.0)
            blue = append(blue, float32(b/257)/255.0)
        }
    }
    input := append(red, green...)
    input = append(input, blue...)
    return input, img_width, img_height
}
</code></pre>
<p>Now, let’s run it through the model.</p>
<p><a href=""></a></p>
<h2 id="run-the-model-4"><a class="header" href="#run-the-model-4"><a href="#run-the-model"></a>Run the model</a></h2>
<p>The <code>run_model</code> does the same as in <a href="#run_the_model_python">Python example</a>, but it is quite wordy, because of Go language specifics:</p>
<pre><code>func run_model(input []float32) []float32 {
    ort.SetSharedLibraryPath(&quot;./libonnxruntime.so&quot;)
    _ = ort.InitializeEnvironment()

    inputShape := ort.NewShape(1, 3, 640, 640)
    inputTensor, _ := ort.NewTensor(inputShape, input)

    outputShape := ort.NewShape(1, 84, 8400)
    outputTensor, _ := ort.NewEmptyTensor[float32](outputShape)

    model, _ := ort.NewSession[float32](&quot;./yolov8m.onnx&quot;,
        []string{&quot;images&quot;}, []string{&quot;output0&quot;},
        []*ort.Tensor[float32]{inputTensor},[]*ort.Tensor[float32]{outputTensor})

    _ = model.Run()
    return outputTensor.GetData()
}
</code></pre>
<ul>
<li>As written in the <a href="#setup_the_project_go">setup section</a>, the Go ONNX library needs to know where is ONNX runtime library located. You need to use the <code>ort.SetSharedLibraryPath()</code> to specify a location of main file of the ONNX runtime library and initialize the environment with this library. If you downloaded it manually, as suggested earlier, then just specify a name of the file. For Linux, the file name will be <code>libonnxruntime.so</code>, for macOS - <code>libonnxruntime.dylib</code>, for Windows - <code>onnxruntime.dll</code>. I work on Linux, so in this example I use the Linux library.</li>
<li>Then, the library requires converting the <code>input</code> to internal tensor format with (1,3,640,640) shape.</li>
<li>Then, the library also requires creating an empty structure for output tensor, and specify its shape. The Go ONNX library does not return the output, but it writes it to the variable, that defined in advance. Here, we defined the <code>outputTensor</code> variable as a tensor with (1,84,8400) shape that will be used to receive the data from the model.</li>
<li>Then we create a model using the <code>NewSession</code> function, which receives both arrays of input and output names and arrays of input and output tensors.</li>
<li>Then we run this model, that processes input and writes the output to the <code>outputTensor</code> variable.</li>
<li>The <code>outputTensor.GetData()</code> method returns the output data as a flat array of float numbers.</li>
</ul>
<p>As a result, the function returns the array with (1,84,8400) shape, or you can think about this as about 84x8400 matrix. However, it returns an output as a single dimension array. The numbers in this array ordered as 84x8400, but as a flat array of 705600 items. So, you can’t transpose it, and you can’t traverse it by rows in a loop, because it’s required to specify the absolute position of each item. But do not worry, in the next section we will learn how to deal with it.</p>
<p><a href=""></a></p>
<h2 id="process-the-output-4"><a class="header" href="#process-the-output-4"><a href="#process-the-output"></a>Process the output</a></h2>
<p>The code of the <code>process_output</code> function will use the Intersection Over Union algorithm to filter out all overlapped boxes. It’s easy to rewrite the <a href="#process_the_output_python">iou, intersect and union functions</a> from Python to Go. Include them to your code below the <code>process_output</code> function:</p>
<pre><code>func iou(box1, box2 []interface{}) float64 {
    return intersection(box1, box2) / union(box1, box2)
}

func union(box1, box2 []interface{}) float64 {
    box1_x1, box1_y1, box1_x2, box1_y2 := box1[0].(float64), box1[1].(float64), box1[2].(float64), box1[3].(float64)
    box2_x1, box2_y1, box2_x2, box2_y2 := box2[0].(float64), box2[1].(float64), box2[2].(float64), box2[3].(float64)
    box1_area := (box1_x2 - box1_x1) * (box1_y2 - box1_y1)
    box2_area := (box2_x2 - box2_x1) * (box2_y2 - box2_y1)
    return box1_area + box2_area - intersection(box1, box2)
}

func intersection(box1, box2 []interface{}) float64 {
    box1_x1, box1_y1, box1_x2, box1_y2 := box1[0].(float64), box1[1].(float64), box1[2].(float64), box1[3].(float64)
    box2_x1, box2_y1, box2_x2, box2_y2 := box2[0].(float64), box2[1].(float64), box2[2].(float64), box2[3].(float64)
    x1 := math.Max(box1_x1, box2_x1)
    y1 := math.Max(box1_y1, box2_y1)
    x2 := math.Min(box1_x2, box2_x2)
    y2 := math.Min(box1_y2, box2_y2)
    return (x2 - x1) * (y2 - y1)
}
</code></pre>
<p>also, you will need to find YOLO class label by ID, so add the <code>yolo_classes</code> array to your code:</p>
<pre><code>var yolo_classes = []string{
    &quot;person&quot;, &quot;bicycle&quot;, &quot;car&quot;, &quot;motorcycle&quot;, &quot;airplane&quot;, &quot;bus&quot;, &quot;train&quot;, &quot;truck&quot;, &quot;boat&quot;,
    &quot;traffic light&quot;, &quot;fire hydrant&quot;, &quot;stop sign&quot;, &quot;parking meter&quot;, &quot;bench&quot;, &quot;bird&quot;, &quot;cat&quot;, &quot;dog&quot;, &quot;horse&quot;,
    &quot;sheep&quot;, &quot;cow&quot;, &quot;elephant&quot;, &quot;bear&quot;, &quot;zebra&quot;, &quot;giraffe&quot;, &quot;backpack&quot;, &quot;umbrella&quot;, &quot;handbag&quot;, &quot;tie&quot;,
    &quot;suitcase&quot;, &quot;frisbee&quot;, &quot;skis&quot;, &quot;snowboard&quot;, &quot;sports ball&quot;, &quot;kite&quot;, &quot;baseball bat&quot;, &quot;baseball glove&quot;,
    &quot;skateboard&quot;, &quot;surfboard&quot;, &quot;tennis racket&quot;, &quot;bottle&quot;, &quot;wine glass&quot;, &quot;cup&quot;, &quot;fork&quot;, &quot;knife&quot;, &quot;spoon&quot;,
    &quot;bowl&quot;, &quot;banana&quot;, &quot;apple&quot;, &quot;sandwich&quot;, &quot;orange&quot;, &quot;broccoli&quot;, &quot;carrot&quot;, &quot;hot dog&quot;, &quot;pizza&quot;, &quot;donut&quot;,
    &quot;cake&quot;, &quot;chair&quot;, &quot;couch&quot;, &quot;potted plant&quot;, &quot;bed&quot;, &quot;dining table&quot;, &quot;toilet&quot;, &quot;tv&quot;, &quot;laptop&quot;, &quot;mouse&quot;,
    &quot;remote&quot;, &quot;keyboard&quot;, &quot;cell phone&quot;, &quot;microwave&quot;, &quot;oven&quot;, &quot;toaster&quot;, &quot;sink&quot;, &quot;refrigerator&quot;, &quot;book&quot;,
    &quot;clock&quot;, &quot;vase&quot;, &quot;scissors&quot;, &quot;teddy bear&quot;, &quot;hair drier&quot;, &quot;toothbrush&quot;,
}
</code></pre>
<p>Now let’s implement the <code>process_output</code> function. As mentioned above, the function receives output as a flat array that ordered as 84x8400 matrix. When work in Python, we had a NumPy to transform it to 8400x84 and then traverse in a loop by row. Here, we can’t transform it this way, so, we need to traverse it by columns.</p>
<pre><code>boxes := [][]interface{}{}
for index := 0; index &lt; 8400; index++ {

}
</code></pre>
<p>Moreover, you do not have row indexes and column indexes, but have only absolute indexes. You can only virtually reshape this flat array to 84x8400 matrix in your head and use this representation to calculate these absolute indexes, using those “virtual rows” and “virtual columns”.</p>
<p>Let’s display how the <code>output</code> array looks to clarify this:</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--8sLV1qyG--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/62d9ubppk0a1m13bpq47.png"><img src="https://res.cloudinary.com/practicaldev/image/fetch/s--8sLV1qyG--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/62d9ubppk0a1m13bpq47.png" alt="Image description" /></a></p>
<p>Here we virtually reshaped the <code>output</code> array with 705600 items to a 84x8400 matrix. It has 8400 columns with indexes from 0 to 8399 and 84 rows with indexes from 0 to 83. The absolute indexes of items have written inside boxes. Each detected object represented by a column in this matrix. The first 4 rows of each column with indexes from 0 to 3 are coordinates of the bounding box of the appropriate object: x_center, y_center, width and height. Cells in the other 80 rows, starting from 4 to 83 contain the probabilities that the object belongs to each of the 80 YOLO classes.</p>
<p>I drew this table to understand how to calculate the absolute index of any item in it, knowing the row and column indexes. For example, how you calculate the index of first greyed item that stands on row 2 and column 2, which is a bounding box <code>width</code> of the third detected object? If you think about this a little more, you will find, that to calculate this you need to multiply the row index by the length of the row (8400) and add the column index to this. Let’s check it: 8400<em>2+2=16802. Now, let’s calculate the index of the item below it, which is a height of the same object: 8400</em>3+2=25202. Bingo! Matched again! Finally, let’s check the bottom gray box, which is a probability that object 8399 belongs to class 79 (toothbrush): 8400*83+8398=705598. Great, so you have a formula to calculate absolute index: <code>8400*row_index+column_index</code>.</p>
<p>Let’s return to our empty loop. Assuming that the <code>index</code> loop counter is an index of current column and that coordinates of bounding box located in rows 0-3 of current column, we can extract them this way:</p>
<pre><code>boxes := [][]interface{}{}
for index := 0; index &lt; 8400; index++ {
    xc := output[index]
    yc := output[8400+index]
    w := output[2*8400+index]
    h := output[3*8400+index]
}
</code></pre>
<p>Then you can calculate the corners of the bounding box and scale them to the size of the original image:</p>
<pre><code>    x1 := (xc - w/2) / 640 * float32(img_width)
    y1 := (yc - h/2) / 640 * float32(img_height)
    x2 := (xc + w/2) / 640 * float32(img_width)
    y2 := (yc + h/2) / 640 * float32(img_height)
</code></pre>
<p>Now similarly you need to get probabilities of the object, that goes in rows from 4 to 83, find which of them is biggest and the index of this probability, and save these values to the <code>prob</code> and the <code>class_id</code> variables. You can write a nested loop, that traverses rows from 4 to 83 and saves the highest value, and it’s index:</p>
<pre><code>class_id, prob := 0, float32(0.0)
for col := 0; col &lt; 80; col++ {
    if output[8400*(col+4)+index] &gt; prob {
        prob = output[8400*(col+4)+index]
        class_id = col
    }
}
</code></pre>
<p>Then, having the maximum probability and class_id, you can either skip that object, if the probability is less than 0.5 or find the label of this class.</p>
<p>Here is a final code, that processes and collects bounding boxes to the <code>boxes</code> array:</p>
<pre><code>boxes := [][]interface{}{}
for index := 0; index &lt; 8400; index++ {
    class_id, prob := 0, float32(0.0)
    for col := 0; col &lt; 80; col++ {
        if output[8400*(col+4)+index] &gt; prob {
            prob = output[8400*(col+4)+index]
            class_id = col
        }
    }
    if prob &lt; 0.5 {
        continue
    }
    label := yolo_classes[class_id]
    xc := output[index]
    yc := output[8400+index]
    w := output[2*8400+index]
    h := output[3*8400+index]
    x1 := (xc - w/2) / 640 * float32(img_width)
    y1 := (yc - h/2) / 640 * float32(img_height)
    x2 := (xc + w/2) / 640 * float32(img_width)
    y2 := (yc + h/2) / 640 * float32(img_height)
    boxes = append(boxes, []interface{}{float64(x1), float64(y1), float64(x2), float64(y2), label, prob})
}
</code></pre>
<p>The last step is to filter the <code>boxes</code> array using “Non-maximum suppression”, to exclude all overlapping boxes from it. This code does the same as the <a href="#process_the_output_python">Python implementation</a>, but looks slightly different because of the Go language specifics:</p>
<pre><code>sort.Slice(boxes, func(i, j int) bool {
    return boxes[i][5].(float32) &lt; boxes[j][5].(float32)
})
result := [][]interface{}{}
for len(boxes) &gt; 0 {
    result = append(result, boxes[0])
    tmp := [][]interface{}{}
    for _, box := range boxes {
        if iou(boxes[0], box) &lt; 0.7 {
            tmp = append(tmp, box)
        }
    }
    boxes = tmp
}
</code></pre>
<ul>
<li>First we sort the boxes by probability in reverse order to put the boxes with the highest probability to the top</li>
<li>In a loop, we put the box with the highest probability to the <code>result</code> array</li>
<li>Then we create a temporary <code>tmp</code> array and in the inner loop over all boxes, we put to this array only boxes, that do not overlap selected too much (that have IoU&lt;0.7).</li>
<li>Then we overwrite the <code>boxes</code> array with the <code>tmp</code> array. This way, we filter out all overlapping boxes from the <code>boxes</code> array.</li>
<li>If some boxes exist after filtering, the loop continues going until the <code>boxes</code> array becomes empty.</li>
</ul>
<p>Finally, the <code>result</code> variable contains all bounding boxes that should be returned.</p>
<p>That’s all! For convenience, here is a full code of the <code>process_output</code> function:</p>
<pre><code>func process_output(output []float32, img_width, img_height int64) [][]interface{} {
    boxes := [][]interface{}{}
    for index := 0; index &lt; 8400; index++ {
        class_id, prob := 0, float32(0.0)
        for col := 0; col &lt; 80; col++ {
            if output[8400*(col+4)+index] &gt; prob {
                prob = output[8400*(col+4)+index]
                class_id = col
            }
        }
        if prob &lt; 0.5 {
            continue
        }
        label := yolo_classes[class_id]
        xc := output[index]
        yc := output[8400+index]
        w := output[2*8400+index]
        h := output[3*8400+index]
        x1 := (xc - w/2) / 640 * float32(img_width)
        y1 := (yc - h/2) / 640 * float32(img_height)
        x2 := (xc + w/2) / 640 * float32(img_width)
        y2 := (yc + h/2) / 640 * float32(img_height)
        boxes = append(boxes, []interface{}{float64(x1), float64(y1), float64(x2), float64(y2), label, prob})
    }

    sort.Slice(boxes, func(i, j int) bool {
        return boxes[i][5].(float32) &lt; boxes[j][5].(float32)
    })
    result := [][]interface{}{}
    for len(boxes) &gt; 0 {
        result = append(result, boxes[0])
        tmp := [][]interface{}{}
        for _, box := range boxes {
            if iou(boxes[0], box) &lt; 0.7 {
                tmp = append(tmp, box)
            }
        }
        boxes = tmp
    }
    return result
}
</code></pre>
<p>If you like to work with this output in a more convenient “Pythonic” way, there is a <a href="https://github.com/gorgonia/tensor">Gorgonia Tensor</a> library that emulates features of NumPy in Go. You can use it to physically reshape the output to 84x8400, then transpose to 8400x84 and then traverse detected objects by row.</p>
<p>However, the option to work with single dimension array as with matrix described in this section is the most efficient, because we got all values we need without additional array transformations. I think that installing additional external dependency is overkill for this case.</p>
<p>That is it for Go implementation. If you wrote everything correctly, then you can start this web service by running the following command:</p>
<pre><code>go run main.go
</code></pre>
<p>and open <code>http://localhost:8080</code> in a web browser.</p>
<p>The code that we developed here intended only to demonstrate how to load and run the YOLOv8 models using ONNX runtime. I made it as simple as possible, and it does not include any details, except working with ONNX. It does not include any resource management, error processing and exception handling. These tasks depend on real use cases and it’s up to you how to implement it for your projects.</p>
<p>Full reference of GO library for ONNX runtime available <a href="https://pkg.go.dev/github.com/yalue/onnxruntime_go">here</a>.</p>
<p>You can find a source code of Go object detector web service in <a href="https://github.com/AndreyGermanov/yolov8_onnx_go">this repository</a>.</p>
<p><a href=""></a></p>
<h1 id="create-a-web-service-on-rust"><a class="header" href="#create-a-web-service-on-rust"><a href="#create-a-web-service-on-rust"></a>Create a web service on Rust</a></h1>
<p>This article can not be complete without an example of a low level language, the high performance and efficient language, on which developers manage memory by themselves and not rely on a garbage collector. I was thinking which one to choose, either C++ or Rust. Finally, I decided to ask people and created the following poll in the LinkedIn group:</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--45f3xg8m--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/2zspbv1pjbukyrqd7ie2.png"><img src="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/6fa64a91388244dba25e21a6b92bf0db~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=506&amp;h=380&amp;s=14498&amp;e=png&amp;b=fdfdfd" alt="Image description" /></a></p>
<p>Regardless of received results, I also analyzed comments and understood that highly likely people answered not the question that I have asked. I did not ask “Which of these programming languages do you know?”, or “Which of them do you like?” or “Which of them is the most popular?”. Instead, I asked: “Which is better to learn TODAY to create NEW high performance server applications?”.</p>
<p>Finally, I got only one valuable comment:</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--8ewofivV--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/sq4k8zt2e3456z0akl2t.png"><img src="https://gitcode.net/dnrops/blog_images/-/raw/main/all_imgs/99b3f0a44c5f4b72bbd3f45ea4090034~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.png#??w=494&amp;h=277&amp;s=11672&amp;e=png&amp;b=f4f4f4" alt="Image description" /></a></p>
<p>It was the only comment that received some likes and I completely agree with that text.</p>
<p>Finally, the choice was made! We are going to create an object detection web service on Rust - the safest low-level programming language today.</p>
<p><a href=""></a></p>
<h2 id="setup-the-project-5"><a class="header" href="#setup-the-project-5"><a href="#setup-the-project"></a>Setup the project</a></h2>
<p>Enter the command to create a new Rust project:</p>
<pre><code>cargo new object_detector
</code></pre>
<p>This will create an <code>object_detector</code> folder with a project template in it.</p>
<p>Go to this folder and open the <code>Cargo.toml</code> file in it.</p>
<p>Write the following packages to the <code>dependencies section</code>:</p>
<pre><code>[dependencies]
image = &quot;0.24.6&quot;
ndarray = &quot;0.15.6&quot;
ort = &quot;1.14.6&quot;
serde = &quot;1.0.84&quot;
serde_derive = &quot;1.0.84&quot;
serde_json = &quot;1.0.36&quot;
rocket = &quot;=0.5.0-rc.3&quot;
</code></pre>
<ul>
<li><a href="https://docs.rs/image/latest/image/">image</a> - library for image processing.</li>
<li><a href="https://docs.rs/ndarray/latest/ndarray/">ndarray</a> - multidimensional array support library.</li>
<li><a href="https://github.com/pykeio/ort">ort</a> - ONNX runtime library.</li>
<li><a href="https://serde.rs/">serde,serde_derive,serde_json</a> - Serialization library to serialize data to JSON.</li>
<li><a href="https://rocket.rs/">rocket</a> - Web framework.</li>
</ul>
<p>Create a <code>Rocket.toml</code> file which will contain configuration for the Rocket web server and add the following lines to it:</p>
<pre><code>[global]
address = &quot;0.0.0.0&quot;
port = 8080
</code></pre>
<p>We are not going to change frontend, so copy the <code>index.html</code> to the project. Also, copy the <code>yolov8m.onnx</code> model.</p>
<p>Before continue, ensure that the ONNX runtime installed on your operating system, because the library that integrated to the Rust package may not work correctly. To install it, you can download the archive for your operating system from <a href="https://github.com/microsoft/onnxruntime/releases">here</a>, extract and copy contents of “lib” subfolder to the system libraries path of your operating system.</p>
<p>The <code>main.rs</code>, the main project file already generated, and it’s located in the <code>src</code> subfolder. Open this file and add the following boilerplate code to it:</p>
<pre><code>use std::{sync::Arc, path::Path, vec};
use image::{GenericImageView, imageops::FilterType};
use ndarray::{Array, IxDyn, s, Axis};
use ort::{Environment,SessionBuilder,tensor::InputTensor};
use rocket::{response::content,fs::TempFile,form::Form};
#[macro_use] extern crate rocket;

#[rocket::main]
async fn main() {
    rocket::build()
        .mount(&quot;/&quot;, routes![index])
        .mount(&quot;/detect&quot;, routes![detect])
        .launch().await.unwrap();
}

#[get(&quot;/&quot;)]
fn index() -&gt; content::RawHtml&lt;String&gt; {
    return content::RawHtml(std::fs::read_to_string(&quot;index.html&quot;).unwrap());
}

#[post(&quot;/&quot;, data = &quot;&lt;file&gt;&quot;)]
fn detect(file: Form&lt;TempFile&lt;'_&gt;&gt;) -&gt; String {
    let buf = std::fs::read(file.path().unwrap_or(Path::new(&quot;&quot;))).unwrap_or(vec![]);
    let boxes = detect_objects_on_image(buf);
    return serde_json::to_string(&amp;boxes).unwrap_or_default()
}

fn detect_objects_on_image(buf: Vec&lt;u8&gt;) -&gt; Vec&lt;(f32,f32,f32,f32,&amp;'static str,f32)&gt; {
    let (input,img_width,img_height) = prepare_input(buf);
    let output = run_model(input);
    return process_output(output, img_width, img_height);    
}

fn prepare_input(buf: Vec&lt;u8&gt;) -&gt; (Array&lt;f32,IxDyn&gt;, u32, u32) {

}

fn run_model(input:Array&lt;f32,IxDyn&gt;) -&gt; Array&lt;f32,IxDyn&gt; {

}

fn process_output(output:Array&lt;f32,IxDyn&gt;,img_width: u32, img_height: u32) -&gt; Vec&lt;(f32,f32,f32,f32,&amp;'static str, f32)&gt; {

}
</code></pre>
<p>First block imports required modules:</p>
<ul>
<li><code>image</code> - to process images</li>
<li><code>ndarray</code> - to work with tensors</li>
<li><code>ort</code> - ONNX runtime library</li>
<li><code>rocket</code> - Rocket Web framework</li>
<li><code>std</code> - some objects from Rust standard library</li>
</ul>
<p>Then, in the main function we start the <code>Rocket</code> web server and attach <code>index</code> and <code>detect</code> routes to it.</p>
<p>The <code>index</code> function serves the root of the service, it just returns the content of the <code>index.html</code> file as HTML.</p>
<p>The <code>detect</code> function serves the <code>/detect</code> endpoint. It receives the uploaded file, passes it to the <code>detect_objects_on_image</code>, receives the array of bounding boxes, serializes them to JSON and returns this JSON string to the frontend.</p>
<p>The <code>detect_objects_on_image</code> implements the same actions as the <a href="#setup_the_project_python">Python version</a>. It converts the image to the multidimensional array of numbers, passes it to the ONNX runtime and processes the output. Finally, it returns the array of bounding boxes, where each bounding box is a tuple of (x1,y1,x2,y2,label, prob). The Rust is strong typed language, so we have to specify types of all variables in this tuple. That is why it returns <code>Vec&lt;(f32,f32,f32,f32,&amp;'static str,f32)&gt;</code> which is a vector of bounding box tuples.</p>
<p>Then we define stubs for <code>prepare_input</code>, <code>run_model</code> and <code>process_output</code> functions, that will be implemented one by one in the following sections.</p>
<p><a href=""></a></p>
<h2 id="prepare-the-input-6"><a class="header" href="#prepare-the-input-6"><a href="#prepare-the-input"></a>Prepare the input</a></h2>
<p>To prepare the input for the YOLOv8 model, you need to load the image, resize it and convert to a tensor of (1,3,640,640) shape which is an array of single image represented as 3 640x640 matrices. The first item is an array of red components of image pixels, the second item is an array of greens, and the last item is an array of blues. We will use the <code>ndarray</code> library to construct this tensor and fill it with pixel color values. But first we need to load the image, and resize it to 640x640:</p>
<pre><code>let img = image::load_from_memory(&amp;buf).unwrap();
let (img_width, img_height) = (img.width(), img.height());
let img = img.resize_exact(640, 640, FilterType::CatmullRom);
</code></pre>
<ul>
<li>In the first line, the image is loaded from uploaded file buffer</li>
<li>Next, we save the original image width and height for future</li>
<li>Finally, we resized the image to 640x640</li>
</ul>
<p>Then, let’s construct the input array of required shape:</p>
<pre><code>let mut input = Array::zeros((1, 3, 640, 640)).into_dyn();
</code></pre>
<p>This line created a new 4-dimensional tensor filled with zeros.</p>
<p>Now, you need to get access to the image pixels and their color components. The <code>img</code> object has a <code>pixels()</code> method, which is an iterator for image pixels. You can use it to get access to each pixel in a loop:</p>
<pre><code>for pixel in img.pixels() {
}
</code></pre>
<p>The <code>pixel</code> is a <code>Pixel</code> object with properties that we need:</p>
<ul>
<li><code>x</code> - the x coordinate of pixel</li>
<li><code>y</code> - the y coordinate of pixel</li>
<li><code>color</code> - the object with an array with 4 items [r,g,b,a]: color components of pixel.</li>
</ul>
<p>Having this, you can fill the tensor input in a loop:</p>
<pre><code>for pixel in img.pixels() {
    let x = pixel.0 as usize;
    let y = pixel.1 as usize;
    let [r,g,b,_] = pixel.2.0;
    input[[0, 0, y, x]] = (r as f32) / 255.0;
    input[[0, 1, y, x]] = (g as f32) / 255.0;
    input[[0, 2, y, x]] = (b as f32) / 255.0;
};
</code></pre>
<ul>
<li>First, we extract <code>x</code> and <code>y</code> variables and convert them to the type that can be used as a tensor index</li>
<li>Then we destructure color to <code>r</code>, <code>g</code> and <code>b</code> variables.</li>
<li>Finally, we put these pixel color components to appropriate cells of the tensor. Notice that the <code>y</code> goes first and the <code>x</code> goes next. This is because in matrices, the first dimension is a row and the second is a column.</li>
</ul>
<p>So, now you have an input prepared for the neural network. You need to return it from the function along with <code>img_width</code> and <code>img_height</code>. Here is a full source of the <code>prepare_input</code>:</p>
<pre><code>fn prepare_input(buf: Vec&lt;u8&gt;) -&gt; (Array&lt;f32,IxDyn&gt;, u32, u32) {
    let img = image::load_from_memory(&amp;buf).unwrap();
    let (img_width, img_height) = (img.width(), img.height());
    let img = img.resize_exact(640, 640, FilterType::CatmullRom);
    let mut input = Array::zeros((1, 3, 640, 640)).into_dyn();
    for pixel in img.pixels() {
        let x = pixel.0 as usize;
        let y = pixel.1 as usize;
        let [r,g,b,_] = pixel.2.0;
        input[[0, 0, y, x]] = (r as f32) / 255.0;
        input[[0, 1, y, x]] = (g as f32) / 255.0;
        input[[0, 2, y, x]] = (b as f32) / 255.0;
    };
    return (input, img_width, img_height);
}
</code></pre>
<p>Now, it’s time to pass this input through the YOLOv8 model.</p>
<p><a href=""></a></p>
<h2 id="run-the-model-5"><a class="header" href="#run-the-model-5"><a href="#run-the-model"></a>Run the model</a></h2>
<p>The <code>run_model</code> function used to pass the input tensor through the model and return the output tensor. This is its source code:</p>
<pre><code>fn run_model(input:Array&lt;f32,IxDyn&gt;) -&gt; Array&lt;f32,IxDyn&gt; {
    let input = InputTensor::FloatTensor(input);
    let env = Arc::new(Environment::builder().with_name(&quot;YOLOv8&quot;).build().unwrap());
    let model = SessionBuilder::new(&amp;env).unwrap().with_model_from_file(&quot;yolov8m.onnx&quot;).unwrap();
    let outputs = model.run([input]).unwrap();
    let output = outputs.get(0).unwrap().try_extract::&lt;f32&gt;().unwrap().view().t().into_owned();
    return output;
}
</code></pre>
<ul>
<li>First it converts the input to the internal ONNX runtime tensor format</li>
<li>Then it creates the <code>env</code>ironment and instantiates the ONNX <code>model</code> in it from the <code>yolov8m.onnx</code> file.</li>
<li>Then it runs the model with the <code>input</code> tensor and receives the array of outputs.</li>
<li>Finally, it extracts the first <code>output</code> and returns it.</li>
</ul>
<p>The returned output is an <code>Ndarray</code> tensor, so we can traverse it in a loop. Let’s process it.</p>
<p><a href=""></a></p>
<h2 id="process-the-output-5"><a class="header" href="#process-the-output-5"><a href="#process-the-output"></a>Process the output</a></h2>
<p>The code of the <code>process_output</code> function will use the Intersection Over Union algorithm to filter out all overlapped boxes. It’s easy to rewrite the <a href="#process_the_output_python">iou, intersect and union functions</a> from Python to Rust. Include them to your code below the <code>process_output</code> function:</p>
<pre><code>fn iou(box1: &amp;(f32, f32, f32, f32, &amp;'static str, f32), box2: &amp;(f32, f32, f32, f32, &amp;'static str, f32)) -&gt; f32 {
    return intersection(box1, box2) / union(box1, box2);
}

fn union(box1: &amp;(f32, f32, f32, f32, &amp;'static str, f32), box2: &amp;(f32, f32, f32, f32, &amp;'static str, f32)) -&gt; f32 {
    let (box1_x1,box1_y1,box1_x2,box1_y2,_,_) = *box1;
    let (box2_x1,box2_y1,box2_x2,box2_y2,_,_) = *box2;
    let box1_area = (box1_x2-box1_x1)*(box1_y2-box1_y1);
    let box2_area = (box2_x2-box2_x1)*(box2_y2-box2_y1);
    return box1_area + box2_area - intersection(box1, box2);
}

fn intersection(box1: &amp;(f32, f32, f32, f32, &amp;'static str, f32), box2: &amp;(f32, f32, f32, f32, &amp;'static str, f32)) -&gt; f32 {
    let (box1_x1,box1_y1,box1_x2,box1_y2,_,_) = *box1;
    let (box2_x1,box2_y1,box2_x2,box2_y2,_,_) = *box2;
    let x1 = box1_x1.max(box2_x1);
    let y1 = box1_y1.max(box2_y1);
    let x2 = box1_x2.min(box2_x2);
    let y2 = box1_y2.min(box2_y2);
    return (x2-x1)*(y2-y1);
}
</code></pre>
<p>Also, we will need to get labels for detected objects, so include this array of COCO class labels:</p>
<pre><code>const YOLO_CLASSES:[&amp;str;80] = [
    &quot;person&quot;, &quot;bicycle&quot;, &quot;car&quot;, &quot;motorcycle&quot;, &quot;airplane&quot;, &quot;bus&quot;, &quot;train&quot;, &quot;truck&quot;, &quot;boat&quot;,
    &quot;traffic light&quot;, &quot;fire hydrant&quot;, &quot;stop sign&quot;, &quot;parking meter&quot;, &quot;bench&quot;, &quot;bird&quot;, &quot;cat&quot;, &quot;dog&quot;, &quot;horse&quot;,
    &quot;sheep&quot;, &quot;cow&quot;, &quot;elephant&quot;, &quot;bear&quot;, &quot;zebra&quot;, &quot;giraffe&quot;, &quot;backpack&quot;, &quot;umbrella&quot;, &quot;handbag&quot;, &quot;tie&quot;,
    &quot;suitcase&quot;, &quot;frisbee&quot;, &quot;skis&quot;, &quot;snowboard&quot;, &quot;sports ball&quot;, &quot;kite&quot;, &quot;baseball bat&quot;, &quot;baseball glove&quot;,
    &quot;skateboard&quot;, &quot;surfboard&quot;, &quot;tennis racket&quot;, &quot;bottle&quot;, &quot;wine glass&quot;, &quot;cup&quot;, &quot;fork&quot;, &quot;knife&quot;, &quot;spoon&quot;,
    &quot;bowl&quot;, &quot;banana&quot;, &quot;apple&quot;, &quot;sandwich&quot;, &quot;orange&quot;, &quot;broccoli&quot;, &quot;carrot&quot;, &quot;hot dog&quot;, &quot;pizza&quot;, &quot;donut&quot;,
    &quot;cake&quot;, &quot;chair&quot;, &quot;couch&quot;, &quot;potted plant&quot;, &quot;bed&quot;, &quot;dining table&quot;, &quot;toilet&quot;, &quot;tv&quot;, &quot;laptop&quot;, &quot;mouse&quot;,
    &quot;remote&quot;, &quot;keyboard&quot;, &quot;cell phone&quot;, &quot;microwave&quot;, &quot;oven&quot;, &quot;toaster&quot;, &quot;sink&quot;, &quot;refrigerator&quot;, &quot;book&quot;,
    &quot;clock&quot;, &quot;vase&quot;, &quot;scissors&quot;, &quot;teddy bear&quot;, &quot;hair drier&quot;, &quot;toothbrush&quot;
];
</code></pre>
<p>Now let’s start writing the <code>process_output</code> function.</p>
<p>Let’s define an array to which you will put collected bounding boxes:</p>
<pre><code>let mut boxes = Vec::new();
</code></pre>
<p>The <code>output</code> from YOLOv8 model is a tensor and for some reason, it has a shape [8400,84,1], instead of how it looks in other programming languages. It’s already ordered by rows, but has an extra dimension at the end. Let’s remove it:</p>
<pre><code>let output = output.slice(s![..,..,0])
</code></pre>
<p>This line extracted the (8400,84) matrix from this tensor, and we can traverse it by first axis, e.g. by rows:</p>
<pre><code>for row in output.axis_iter(Axis(0)) {
}
</code></pre>
<p>Here, the <code>row</code> is a single dimension <code>NdArray</code> object that represents a row with 84 float numbers. It will be more convenient to convert it to the basic array, let’s do it:</p>
<pre><code>for row in output.axis_iter(Axis(0)) {
    let row:Vec&lt;_&gt; = row.iter().map(|x| *x).collect();
}
</code></pre>
<p>The first 4 items of this array contain bounding box coordinates, and we can convert and scale them to x1,y1,x2,y2 now:</p>
<pre><code>let xc = row[0]/640.0*(img_width as f32);
let yc = row[1]/640.0*(img_height as f32);
let w = row[2]/640.0*(img_width as f32);
let h = row[3]/640.0*(img_height as f32);
let x1 = xc - w/2.0;
let x2 = xc + w/2.0;
let y1 = yc - h/2.0;
let y2 = yc + h/2.0;
</code></pre>
<p>Then, all items from 4 to 83 are probabilities that this bounding box belongs to each of 80 object classes. You need to find maximum of these items and the index of this item, which can be used as an ID of object class. You can do this in a loop:</p>
<pre><code>let mut class_id = 0;
let mut prob:f32 = 0.0;
for index in 4..row.len() {
    if row[index]&gt;prob {
        prob = row[index];
        class_id = index-4;
    }
}
let label = YOLO_CLASSES[class_id];
</code></pre>
<p>Here we determined the maximum probability, the class_id of object with maximum probability and the <code>label</code> of object of this class.</p>
<p>It works ok, but I’d better implement it in a functional way instead of loop:</p>
<pre><code>let (class_id, prob) = row.iter().skip(4).enumerate()
    .map(|(index,value)| (index,**value))
    .reduce(|accum, row| if row.1&gt;accum.1 { row } else {accum}).unwrap();
let label = YOLO_CLASSES[class_id];
</code></pre>
<ul>
<li>This code gets an iterator for row element that starts from 4th item.</li>
<li>Then it maps the row items to a tuples (class_id, prob).</li>
<li>Then it reduces this array of tuples to a single element with maximum <code>prob</code>.</li>
<li>The resulting tuple, the destructured to the <code>class_id</code> and <code>prob</code> variables.</li>
</ul>
<p>Finally, you can skip the row if the <code>prob</code> &lt; 0.5 or collect all values to a bounding box and push this bounding box to the <code>boxes</code> array.</p>
<p>Here is all code that we have now, in which operations ordered correctly:</p>
<pre><code>let mut boxes = Vec::new();
let output = output.slice(s![..,..,0]);
for row in output.axis_iter(Axis(0)) {
    let row:Vec&lt;_&gt; = row.iter().map(|x| *x).collect();
    let (class_id, prob) = row.iter().skip(4).enumerate()
        .map(|(index,value)| (index,**value))
        .reduce(|accum, row| if row.1&gt;accum.1 { row } else {accum}).unwrap();
    if prob &lt; 0.5 {
        continue
    }
    let label = YOLO_CLASSES[class_id];
    let xc = row[0]/640.0*(img_width as f32);
    let yc = row[1]/640.0*(img_height as f32);
    let w = row[2]/640.0*(img_width as f32);
    let h = row[3]/640.0*(img_height as f32);
    let x1 = xc - w/2.0;
    let x2 = xc + w/2.0;
    let y1 = yc - h/2.0;
    let y2 = yc + h/2.0;
    boxes.push((x1,y1,x2,y2,label,prob));
}
</code></pre>
<p>P.S. Actually, it’s possible to implement all this in a functional way instead of loop. You can do it as a homework.</p>
<p>Finally, you need to filter the <code>boxes</code> array to exclude the boxes, that overlap each other, using the <code>Intersection over union</code>. The filtered boxes should be collected to the <code>result</code> array:</p>
<pre><code>let mut result = Vec::new();
boxes.sort_by(|box1,box2| box2.5.total_cmp(&amp;box1.5));
while boxes.len()&gt;0 {
    result.push(boxes[0]);
    boxes = boxes.iter().filter(|box1| iou(&amp;boxes[0],box1) &lt; 0.7).map(|x| *x).collect()
}
</code></pre>
<ul>
<li>First, we sort <code>boxes</code> by probability in descending order to put the boxes with the highest probability to the top.</li>
<li>Then, in a loop, we put the first box with highest probability to the resulting array</li>
<li>Then, we overwrite the boxes array using a filter, that adds to it only those boxes, which <code>iou</code> value is less than 0.7 if compare with the selected box.</li>
<li>If after filter, the <code>boxes</code> contains more elements, the loop continues.</li>
</ul>
<p>Finally, after the loop, the <code>boxes</code> array will be empty and the <code>result</code> will contain bounding boxes of all different detected objects.</p>
<p>The <code>result</code> array should be returned by this function. Here is the whole code:</p>
<pre><code>fn process_output(output:Array&lt;f32,IxDyn&gt;,img_width: u32, img_height: u32) -&gt; Vec&lt;(f32,f32,f32,f32,&amp;'static str, f32)&gt; {
    let mut boxes = Vec::new();
    let output = output.slice(s![..,..,0]);
    for row in output.axis_iter(Axis(0)) {
        let row:Vec&lt;_&gt; = row.iter().map(|x| *x).collect();
        let (class_id, prob) = row.iter().skip(4).enumerate()
            .map(|(index,value)| (index,**value))
            .reduce(|accum, row| if row.1&gt;accum.1 { row } else {accum}).unwrap();
        if prob &lt; 0.5 {
            continue
        }
        let label = YOLO_CLASSES[class_id];
        let xc = row[0]/640.0*(img_width as f32);
        let yc = row[1]/640.0*(img_height as f32);
        let w = row[2]/640.0*(img_width as f32);
        let h = row[3]/640.0*(img_height as f32);
        let x1 = xc - w/2.0;
        let x2 = xc + w/2.0;
        let y1 = yc - h/2.0;
        let y2 = yc + h/2.0;
        boxes.push((x1,y1,x2,y2,label,prob));
    }

    boxes.sort_by(|box1,box2| box2.5.total_cmp(&amp;box1.5));
    let mut result = Vec::new();
    while boxes.len()&gt;0 {
        result.push(boxes[0]);
        boxes = boxes.iter().filter(|box1| iou(&amp;boxes[0],box1) &lt; 0.7).map(|x| *x).collect()
    }
    return result;
}
</code></pre>
<p>That is it for Rust web service. If everything written correctly, you can start web service by running the following command in the project folder:</p>
<pre><code>cargo run
</code></pre>
<p>and open <code>http://localhost:8080</code> in a web browser.</p>
<p>The code that we developed here is oversimplified. It’s intended only to demonstrate how to load and run the YOLOv8 models using ONNX runtime. I made it as simple as possible, and it does not include any other details, except working with ONNX. It does not include any resource management, error processing and exception handling. These tasks depend on real use cases, and it’s up to you how to implement it for your projects.</p>
<p>Full reference of Rust library for ONNX runtime available <a href="https://docs.rs/ort/1.14.6/ort/">here</a>.</p>
<p>You can find a source code of Rust object detector web service in <a href="https://github.com/AndreyGermanov/yolov8_onnx_rust">this repository</a>.</p>
<p><a href=""></a></p>
<h1 id="conclusion"><a class="header" href="#conclusion"><a href="#conclusion"></a>Conclusion</a></h1>
<p>In this article I showed that even if the YOLOv8 neural network created on Python, you can use it from other programming languages, because it can be exported to universal ONNX format.</p>
<p>We explored the foundational algorithms, used to prepare the input and process the output from ONNX model, which is the same for all programming languages that have interfaces for ONNX runtime.</p>
<p>After discovered the main concepts, I showed how to create an object detection web service based on ONNX runtime using Python, Julia, Node.js, JavaScript, Go and Rust. Each language has some differences, but in general, all workflow follows the same algorithm.</p>
<p>You can apply this experience for any other neural networks, created using PyTorch or TensorFlow (which are the most neural networks, existing in the world), because each framework can export its models to ONNX.</p>
<p>There are ONNX runtime interfaces for other programming languages like Java, C# or C++ and for other platforms, including mobile phones. You can find the list of official bindings <a href="https://onnxruntime.ai/docs/get-started/with-mobile.html">here</a>.</p>
<p>Also, there are unofficial bindings for other languages, like <a href="https://github.com/ankane/onnxruntime-php">PHP</a>. It’s a great way to integrate neural networks to WordPress websites.</p>
<p>I believe that it won’t be difficult to rewrite the projects that we created here on those other languages if you know those languages, of course.</p>
<p>In the next article, I will show how to detect objects on a video in web browser in real time. Follow me to know first when I publish this.</p>
<p>Have a fun coding and never stop learning!</p>

                    </main>

                    <nav class="nav-wrapper" aria-label="Page navigation">
                        <!-- Mobile navigation buttons -->
                            <a rel="prev" href="../../posts/rust/rust_binary_include_dir_and_get_contents.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
                                <i class="fa fa-angle-left"></i>
                            </a>

                            <a rel="next" href="../../posts/rust/implment_builder_proc_macro_for_command_struct.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
                                <i class="fa fa-angle-right"></i>
                            </a>

                        <div style="clear: both"></div>
                    </nav>
                </div>
            </div>

            <nav class="nav-wide-wrapper" aria-label="Page navigation">
                    <a rel="prev" href="../../posts/rust/rust_binary_include_dir_and_get_contents.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
                        <i class="fa fa-angle-left"></i>
                    </a>

                    <a rel="next" href="../../posts/rust/implment_builder_proc_macro_for_command_struct.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
                        <i class="fa fa-angle-right"></i>
                    </a>
            </nav>

        </div>



        <script>
            window.playground_line_numbers = true;
        </script>

        <script>
            window.playground_copyable = true;
        </script>

        <script src="../../ace.js"></script>
        <script src="../../editor.js"></script>
        <script src="../../mode-rust.js"></script>
        <script src="../../theme-dawn.js"></script>
        <script src="../../theme-tomorrow_night.js"></script>

        <script src="../../elasticlunr.min.js"></script>
        <script src="../../mark.min.js"></script>
        <script src="../../searcher.js"></script>

        <script src="../../clipboard.min.js"></script>
        <script src="../../highlight.js"></script>
        <script src="../../book.js"></script>

        <!-- Custom JS scripts -->
        <script src="../../src/js/custom.js"></script>


    </div>
    </body>
</html>
