<!DOCTYPE HTML>
<html lang="en" class="sidebar-visible no-js coal">
    <head>
        <!-- Book generated using mdBook -->
        <meta charset="UTF-8">
        <title>How to detect objects on images using the YOLOv8 neural network - Andrew&#x27;s Blog</title>


        <!-- Custom HTML head -->
        
        <meta name="description" content="Andrew Ryan&#x27;s Blog">
        <meta name="viewport" content="width=device-width, initial-scale=1">
        <meta name="theme-color" content="#ffffff" />

        <link rel="icon" href="../../favicon.svg">
        <link rel="shortcut icon" href="../../favicon.png">
        <link rel="stylesheet" href="../../css/variables.css">
        <link rel="stylesheet" href="../../css/general.css">
        <link rel="stylesheet" href="../../css/chrome.css">

        <!-- Fonts -->
        <link rel="stylesheet" href="../../FontAwesome/css/font-awesome.css">
        <link rel="stylesheet" href="../../fonts/fonts.css">

        <!-- Highlight.js Stylesheets -->
        <link rel="stylesheet" href="../../highlight.css">
        <link rel="stylesheet" href="../../tomorrow-night.css">
        <link rel="stylesheet" href="../../ayu-highlight.css">

        <!-- Custom theme stylesheets -->
        <link rel="stylesheet" href="../../src/style/custom.css">

        <!-- MathJax -->
        <script async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
    </head>
    <body>
    <div id="body-container">
        <!-- Provide site root to javascript -->
        <script>
            var path_to_root = "../../";
            var default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "coal" : "coal";
        </script>

        <!-- Work around some values being stored in localStorage wrapped in quotes -->
        <script>
            try {
                var theme = localStorage.getItem('mdbook-theme');
                var sidebar = localStorage.getItem('mdbook-sidebar');

                if (theme.startsWith('"') && theme.endsWith('"')) {
                    localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
                }

                if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
                    localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
                }
            } catch (e) { }
        </script>

        <!-- Set the theme before any content is loaded, prevents flash -->
        <script>
            var theme;
            try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
            if (theme === null || theme === undefined) { theme = default_theme; }
            var html = document.querySelector('html');
            html.classList.remove('no-js')
            html.classList.remove('coal')
            html.classList.add(theme);
            html.classList.add('js');
        </script>

        <!-- Hide / unhide sidebar before it is displayed -->
        <script>
            var html = document.querySelector('html');
            var sidebar = null;
            if (document.body.clientWidth >= 1080) {
                try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
                sidebar = sidebar || 'visible';
            } else {
                sidebar = 'hidden';
            }
            html.classList.remove('sidebar-visible');
            html.classList.add("sidebar-" + sidebar);
        </script>

        <nav id="sidebar" class="sidebar" aria-label="Table of contents">
            <div class="sidebar-scrollbox">
                <ol class="chapter"><li class="chapter-item affix "><a href="../../index.html">Andrew's Blog</a></li><li class="chapter-item "><a href="../../posts/linux/linux.html"><strong aria-hidden="true">1.</strong> Linux</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/linux/install_linux.html"><strong aria-hidden="true">1.1.</strong> install linux</a></li><li class="chapter-item "><a href="../../posts/linux/bash_profile.html"><strong aria-hidden="true">1.2.</strong> bash profile</a></li><li class="chapter-item "><a href="../../posts/linux/command_list.html"><strong aria-hidden="true">1.3.</strong> command list</a></li><li class="chapter-item "><a href="../../posts/linux/git_guide.html"><strong aria-hidden="true">1.4.</strong> git guide</a></li><li class="chapter-item "><a href="../../posts/linux/tar.html"><strong aria-hidden="true">1.5.</strong> tar</a></li><li class="chapter-item "><a href="../../posts/Linux/git_cheatsheet.html"><strong aria-hidden="true">1.6.</strong> Git Cheatsheet</a></li><li class="chapter-item "><a href="../../posts/Linux/bash_cheatsheet.html"><strong aria-hidden="true">1.7.</strong> Bash Cheatsheet</a></li></ol></li><li class="chapter-item "><a href="../../posts/macos/mac.html"><strong aria-hidden="true">2.</strong> MacOS</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/macos/macos_profiles.html"><strong aria-hidden="true">2.1.</strong> macos profiles</a></li><li class="chapter-item "><a href="../../posts/macos/macos_pwn_env_setup.html"><strong aria-hidden="true">2.2.</strong> macos pwn env setup</a></li></ol></li><li class="chapter-item "><a href="../../posts/swift/swift.html"><strong aria-hidden="true">3.</strong> Swift</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/swift/learn_swift.html"><strong aria-hidden="true">3.1.</strong> learn swift basics</a></li><li class="chapter-item "><a href="../../posts/swift/swift_extensions.html"><strong aria-hidden="true">3.2.</strong> Swift extensions</a></li><li class="chapter-item "><a href="../../posts/swift/swiftui_extension.html"><strong aria-hidden="true">3.3.</strong> SwiftUI extensions</a></li><li class="chapter-item "><a href="../../posts/swift/install_swift.html"><strong aria-hidden="true">3.4.</strong> install swift</a></li><li class="chapter-item "><a href="../../posts/swift/task_planner.html"><strong aria-hidden="true">3.5.</strong> implment task panner app with SwiftUI</a></li><li class="chapter-item "><a href="../../posts/swift/swift_cheat_sheet.html"><strong aria-hidden="true">3.6.</strong> Swift Cheat Sheet</a></li><li class="chapter-item "><a href="../../posts/swift/yinci_url.html"><strong aria-hidden="true">3.7.</strong> Personal privacy protocol</a></li><li class="chapter-item "><a href="../../posts/swift/swift_regular_exressions.html"><strong aria-hidden="true">3.8.</strong> Swift regular exressions</a></li><li class="chapter-item "><a href="../../posts/ios/how_to_create_beautiful_ios_charts_in_swift.html"><strong aria-hidden="true">3.9.</strong> How to Create Beautiful iOS Charts in Swift</a></li><li class="chapter-item "><a href="../../posts/swift/swiftui_source_code.html"><strong aria-hidden="true">3.10.</strong> SwiftUI source code</a></li><li class="chapter-item "><a href="../../posts/swift/use_swift_fetch_iciba_api.html"><strong aria-hidden="true">3.11.</strong> use swift fetch iciba API</a></li></ol></li><li class="chapter-item "><a href="../../posts/ios/ios.html"><strong aria-hidden="true">4.</strong> iOS</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/ios/cocaposd_setup_and_install_for_ios_project.html"><strong aria-hidden="true">4.1.</strong> cocaposd setup and install for ios project</a></li><li class="chapter-item "><a href="../../posts/ios/swiftui_show_gif_image.html"><strong aria-hidden="true">4.2.</strong> SwiftUI show gif image</a></li><li class="chapter-item "><a href="../../posts/ios/implement_task_planner_app.html"><strong aria-hidden="true">4.3.</strong> implement Task planner App</a></li></ol></li><li class="chapter-item "><a href="../../posts/objective_c/objective_c.html"><strong aria-hidden="true">5.</strong> Objective-C</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/objective_c/objective_c_cheat_sheet.html"><strong aria-hidden="true">5.1.</strong> Objective-C Cheat Sheet</a></li><li class="chapter-item "><a href="../../posts/objective_c/objective_c_for_absolute_beginners_read_note.html"><strong aria-hidden="true">5.2.</strong> Objective-C Note</a></li></ol></li><li class="chapter-item "><a href="../../posts/dart/dart.html"><strong aria-hidden="true">6.</strong> Dart</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/dart/flutter.html"><strong aria-hidden="true">6.1.</strong> Flutter Cheat Sheet</a></li><li class="chapter-item "><a href="../../posts/dart/dart_cheat_sheet.html"><strong aria-hidden="true">6.2.</strong> Dart Cheat Sheet</a></li><li class="chapter-item "><a href="../../posts/flutter/flutter_dev_test.html"><strong aria-hidden="true">6.3.</strong> Flutter dev test</a></li></ol></li><li class="chapter-item "><a href="../../posts/rust/rust.html"><strong aria-hidden="true">7.</strong> Rust</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/rust/offline_use_rust.html"><strong aria-hidden="true">7.1.</strong> Offline use rust</a></li><li class="chapter-item "><a href="../../posts/rust/rust_grammer.html"><strong aria-hidden="true">7.2.</strong> rust grammar</a></li><li class="chapter-item "><a href="../../posts/rust/pase_string_and_decimal_conversion.html"><strong aria-hidden="true">7.3.</strong> pase string and decimal conversion</a></li><li class="chapter-item "><a href="../../posts/rust/parse_types.html"><strong aria-hidden="true">7.4.</strong> rust types</a></li><li class="chapter-item "><a href="../../posts/rust/rust_life_cycle.html"><strong aria-hidden="true">7.5.</strong> Rust life cycle</a></li><li class="chapter-item "><a href="../../posts/rust/rust_generic.html"><strong aria-hidden="true">7.6.</strong> rust generics</a></li><li class="chapter-item "><a href="../../posts/rust/rust_implment_matrix.html"><strong aria-hidden="true">7.7.</strong> Rust implement matrix</a></li><li class="chapter-item "><a href="../../posts/rust/rust_sort.html"><strong aria-hidden="true">7.8.</strong> Rust implement sort algorithms</a></li><li class="chapter-item "><a href="../../posts/rust/implement_aes_encryption.html"><strong aria-hidden="true">7.9.</strong> Rust implement AEC encryption and decryption</a></li><li class="chapter-item "><a href="../../posts/rust/implement_trie_data_structure.html"><strong aria-hidden="true">7.10.</strong> implement trie data structure</a></li><li class="chapter-item "><a href="../../posts/rust/rust_implement_tree.html"><strong aria-hidden="true">7.11.</strong> implement tree data_structure</a></li><li class="chapter-item "><a href="../../posts/rust/list_dir.html"><strong aria-hidden="true">7.12.</strong> list dir</a></li><li class="chapter-item "><a href="../../posts/rust/fast_way_to_implment_object_trait.html"><strong aria-hidden="true">7.13.</strong> fast way to implment object trait</a></li><li class="chapter-item "><a href="../../posts/rust/compress_rust_binary_size.html"><strong aria-hidden="true">7.14.</strong> compress rust binary size</a></li><li class="chapter-item "><a href="../../posts/rust/implment_file_upload_backend.html"><strong aria-hidden="true">7.15.</strong> impliment file upload</a></li><li class="chapter-item "><a href="../../posts/rust/this_is_add_post_cli_implementation_in_rust.html"><strong aria-hidden="true">7.16.</strong> this is add_post cli implementation in rust</a></li><li class="chapter-item "><a href="../../posts/rust/use_rust_implment_a_copyclipbord_cli.html"><strong aria-hidden="true">7.17.</strong> Use rust implment a copyclipbord CLI</a></li><li class="chapter-item "><a href="../../posts/rust/sqlite_database_add_delete_update_show_in_rust.html"><strong aria-hidden="true">7.18.</strong> sqlite database add delete update show in rust</a></li><li class="chapter-item "><a href="../../posts/rust/implementing_tokio_joinhandle_for_wasm.html"><strong aria-hidden="true">7.19.</strong> Implementing tokio JoinHandle for wasm</a></li><li class="chapter-item "><a href="../../posts/rust/rust_implement_a_crate_for_encode_and_decode_brainfuck_and_ook.html"><strong aria-hidden="true">7.20.</strong> rust implement a crate for encode and decode brainfuck and ook</a></li><li class="chapter-item "><a href="../../posts/rust/slint_builtin_elements.html"><strong aria-hidden="true">7.21.</strong> Slint Builtin Elements</a></li><li class="chapter-item "><a href="../../posts/rust/corporate_network_install_rust_on_windows.html"><strong aria-hidden="true">7.22.</strong> Corporate network install Rust on windows</a></li><li class="chapter-item "><a href="../../posts/rust/rust_binary_file_how_to_judge_static_link_or_dynamic_link_in_macos.html"><strong aria-hidden="true">7.23.</strong> rust binary file how to judge static link or dynamic link in Macos</a></li><li class="chapter-item "><a href="../../posts/rust/rust_binary_include_dir_and_get_contents.html"><strong aria-hidden="true">7.24.</strong> rust binary include dir and get contents</a></li><li class="chapter-item "><a href="../../posts/rust/how_to_create_yolov8_based_object_detection_web_service_using_python,_julia,_node.js,_javascript,_go_and_rust.html"><strong aria-hidden="true">7.25.</strong> How to create YOLOv8-based object detection web service using Python, Julia, Node.js, JavaScript, Go and Rust</a></li><li class="chapter-item "><a href="../../posts/rust/implment_builder_proc_macro_for_command_struct.html"><strong aria-hidden="true">7.26.</strong> implment Builder proc-macro for Command struct</a></li></ol></li><li class="chapter-item "><a href="../../posts/java/java.html"><strong aria-hidden="true">8.</strong> Java</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/java/java_grammar.html"><strong aria-hidden="true">8.1.</strong> java grammar and codewar</a></li><li class="chapter-item "><a href="../../posts/java/run_jar.html"><strong aria-hidden="true">8.2.</strong> java run .jar</a></li><li class="chapter-item "><a href="../../posts/java/java_pomxml_add_defaultgoal_to_build.html"><strong aria-hidden="true">8.3.</strong> Java pomxml add defaultGoal to build</a></li><li class="chapter-item "><a href="../../posts/java/java_set_mvn_mirror.html"><strong aria-hidden="true">8.4.</strong> Java set mvn mirror</a></li></ol></li><li class="chapter-item expanded "><a href="../../posts/python/python.html"><strong aria-hidden="true">9.</strong> Python</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/python/convert_pesn.html"><strong aria-hidden="true">9.1.</strong> convert pesn</a></li><li class="chapter-item "><a href="../../posts/python/find_remove_dir.html"><strong aria-hidden="true">9.2.</strong> find and remove dir</a></li><li class="chapter-item "><a href="../../posts/python/timing_message.html"><strong aria-hidden="true">9.3.</strong> wechat send message</a></li><li class="chapter-item "><a href="../../posts/python/use_python_openpyxl_package_read_and_edit_excel_files.html"><strong aria-hidden="true">9.4.</strong> Use python openpyxl package read and edit excel files</a></li><li class="chapter-item "><a href="../../posts/python/sanctum_model_yaml.html"><strong aria-hidden="true">9.5.</strong> sanctum model yaml</a></li><li class="chapter-item expanded "><a href="../../posts/python/how_to_detect_objects_on_images_using_the_yolov8_neural_network.html" class="active"><strong aria-hidden="true">9.6.</strong> How to detect objects on images using the YOLOv8 neural network</a></li><li class="chapter-item "><a href="../../posts/python/use_huggingface_model.html"><strong aria-hidden="true">9.7.</strong> use huggingface model</a></li></ol></li><li class="chapter-item "><a href="../../posts/go/go.html"><strong aria-hidden="true">10.</strong> Go</a></li><li class="chapter-item "><a href="../../posts/javascript/js.html"><strong aria-hidden="true">11.</strong> Javascript</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/javascript/js_tutorial.html"><strong aria-hidden="true">11.1.</strong> js tutorial</a></li><li class="chapter-item "><a href="../../posts/javascript/js_tutorial_map.html"><strong aria-hidden="true">11.2.</strong> ja map</a></li><li class="chapter-item "><a href="../../posts/javascript/js_tutorial_math.html"><strong aria-hidden="true">11.3.</strong> js math</a></li><li class="chapter-item "><a href="../../posts/javascript/js_tutorial_object.html"><strong aria-hidden="true">11.4.</strong> js object</a></li><li class="chapter-item "><a href="../../posts/javascript/js_tutorial_set.html"><strong aria-hidden="true">11.5.</strong> js set</a></li><li class="chapter-item "><a href="../../posts/javascript/single_thread_and_asynchronous.html"><strong aria-hidden="true">11.6.</strong> single thread and asynchronous</a></li><li class="chapter-item "><a href="../../posts/javascript/this.html"><strong aria-hidden="true">11.7.</strong> js this</a></li><li class="chapter-item "><a href="../../posts/javascript/js_implment_aes.html"><strong aria-hidden="true">11.8.</strong> js implment aes</a></li><li class="chapter-item "><a href="../../posts/javascript/getting_started_with_ajax.html"><strong aria-hidden="true">11.9.</strong> getting started with ajax</a></li><li class="chapter-item "><a href="../../posts/javascript/BinarySearchTree.html"><strong aria-hidden="true">11.10.</strong> binary search tree</a></li><li class="chapter-item "><a href="../../posts/javascript/goole_zx.html"><strong aria-hidden="true">11.11.</strong> goole zx</a></li><li class="chapter-item "><a href="../../posts/javascript/es6.html"><strong aria-hidden="true">11.12.</strong> es6</a></li></ol></li><li class="chapter-item "><a href="../../posts/ruby/ruby.html"><strong aria-hidden="true">12.</strong> Ruby</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/ruby/rails_setup_env.html"><strong aria-hidden="true">12.1.</strong> ruby on rails setup environment</a></li><li class="chapter-item "><a href="../../posts/ruby/learn_ruby.html"><strong aria-hidden="true">12.2.</strong> learn ruby</a></li><li class="chapter-item "><a href="../../posts/ruby/ruby_note.html"><strong aria-hidden="true">12.3.</strong> Ruby Note</a></li><li class="chapter-item "><a href="../../posts/ruby/setup_ruby_for_ctf.html"><strong aria-hidden="true">12.4.</strong> Setup ruby for CTF</a></li></ol></li><li class="chapter-item "><a href="../../posts/react/react.html"><strong aria-hidden="true">13.</strong> React</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/react/react_life_cycle.html"><strong aria-hidden="true">13.1.</strong> react life cycle</a></li><li class="chapter-item "><a href="../../posts/react/react_router.html"><strong aria-hidden="true">13.2.</strong> react router</a></li><li class="chapter-item "><a href="../../posts/react/react_this.html"><strong aria-hidden="true">13.3.</strong> react this</a></li><li class="chapter-item "><a href="../../posts/react/react_interviw.html"><strong aria-hidden="true">13.4.</strong> react interview</a></li><li class="chapter-item "><a href="../../posts/react/important_react_interview.html"><strong aria-hidden="true">13.5.</strong> important react interview</a></li><li class="chapter-item "><a href="../../posts/react/react_quick_reference.html"><strong aria-hidden="true">13.6.</strong> react quick reference</a></li><li class="chapter-item "><a href="../../posts/react/redux_quick_reference.html"><strong aria-hidden="true">13.7.</strong> redux quick reference</a></li></ol></li><li class="chapter-item "><a href="../../posts/vue/vue.html"><strong aria-hidden="true">14.</strong> Vue</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/vue/vue_ajax.html"><strong aria-hidden="true">14.1.</strong> vue ajax</a></li></ol></li><li class="chapter-item "><a href="../../posts/angular/angular.html"><strong aria-hidden="true">15.</strong> Angular</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/angular/controller_communication.html"><strong aria-hidden="true">15.1.</strong> controller communication</a></li><li class="chapter-item "><a href="../../posts/angular/creating_custom_directives.html"><strong aria-hidden="true">15.2.</strong> creating custom directives</a></li><li class="chapter-item "><a href="../../posts/angular/directive_notes.html"><strong aria-hidden="true">15.3.</strong> directive notes</a></li><li class="chapter-item "><a href="../../posts/angular/directive_communication.html"><strong aria-hidden="true">15.4.</strong> directive communication</a></li><li class="chapter-item "><a href="../../posts/angular/post_params.html"><strong aria-hidden="true">15.5.</strong> post params</a></li><li class="chapter-item "><a href="../../posts/angular/read_json_angular.html"><strong aria-hidden="true">15.6.</strong> read json angular</a></li><li class="chapter-item "><a href="../../posts/angular/same_route_reload.html"><strong aria-hidden="true">15.7.</strong> same route reload</a></li></ol></li><li class="chapter-item "><a href="../../posts/css/css.html"><strong aria-hidden="true">16.</strong> Css</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/css/use_css_media.html"><strong aria-hidden="true">16.1.</strong> use css media</a></li></ol></li><li class="chapter-item "><a href="../../posts/php/php.html"><strong aria-hidden="true">17.</strong> Php</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/php/for_php_string_implment_some_extemtion_functions.html"><strong aria-hidden="true">17.1.</strong> for php string implment some extemtion functions</a></li><li class="chapter-item "><a href="../../posts/php/php_cheatsheet.html"><strong aria-hidden="true">17.2.</strong> PHP cheatsheet</a></li></ol></li><li class="chapter-item "><a href="../../posts/windows/windows.html"><strong aria-hidden="true">18.</strong> Windows</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/windows/windows.html"><strong aria-hidden="true">18.1.</strong> Windows</a></li><li class="chapter-item "><a href="../../posts/windows/windows10_use_powershell_dedup_redundent_path.html"><strong aria-hidden="true">18.2.</strong> Windows10 use PowerShell dedup redundent PATH</a></li></ol></li><li class="chapter-item "><a href="../../posts/leetcode/leetcode.html"><strong aria-hidden="true">19.</strong> Leetcode</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/leetcode/rust_leetcode.html"><strong aria-hidden="true">19.1.</strong> rust leetcode</a></li><li class="chapter-item "><a href="../../posts/leetcode/rust_codewar.html"><strong aria-hidden="true">19.2.</strong> rust codewar</a></li><li class="chapter-item "><a href="../../posts/leetcode/swift_codewar.html"><strong aria-hidden="true">19.3.</strong> swift codewar</a></li><li class="chapter-item "><a href="../../posts/leetcode/js_leetcode.html"><strong aria-hidden="true">19.4.</strong> js leetcode</a></li><li class="chapter-item "><a href="../../posts/leetcode/java_leetcode.html"><strong aria-hidden="true">19.5.</strong> java leetcode</a></li><li class="chapter-item "><a href="../../posts/leetcode/rust_huawei.html"><strong aria-hidden="true">19.6.</strong> huawei test</a></li><li class="chapter-item "><a href="../../posts/leetcode/rust_utils.html"><strong aria-hidden="true">19.7.</strong> rust common functions</a></li><li class="chapter-item "><a href="../../posts/leetcode/olympiad_training.html"><strong aria-hidden="true">19.8.</strong> Computer olympiad training</a></li></ol></li><li class="chapter-item "><a href="../../posts/ctf/CTF.html"><strong aria-hidden="true">20.</strong> CTF</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/ctf/CTF_Note.html"><strong aria-hidden="true">20.1.</strong> CTF Note</a></li><li class="chapter-item "><a href="../../posts/ctf/0.1_Web.html"><strong aria-hidden="true">20.2.</strong> Web</a></li><li class="chapter-item "><a href="../../posts/ctf/4.1_Misc.html"><strong aria-hidden="true">20.3.</strong> Misc</a></li><li class="chapter-item "><a href="../../posts/ctf/3.2_PWN_note.html"><strong aria-hidden="true">20.4.</strong> PWN</a></li><li class="chapter-item "><a href="../../posts/ctf/3.1_Crypto.html"><strong aria-hidden="true">20.5.</strong> Crypto</a></li><li class="chapter-item "><a href="../../posts/ctf/3.4_RSA_note.html"><strong aria-hidden="true">20.6.</strong> Rsa attack</a></li><li class="chapter-item "><a href="../../posts/ctf/3.5_Base64.html"><strong aria-hidden="true">20.7.</strong> Base64</a></li><li class="chapter-item "><a href="../../posts/ctf/0.0_SQL Injection Cheatsheet.html"><strong aria-hidden="true">20.8.</strong> SQL Injection Cheatsheet</a></li><li class="chapter-item "><a href="../../posts/ctf/1.1_SQL_injection.html"><strong aria-hidden="true">20.9.</strong> SQL Injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.2_SQL_injection_UNION_attacks.html"><strong aria-hidden="true">20.10.</strong> SQL Injection UNION attacks</a></li><li class="chapter-item "><a href="../../posts/ctf/1.3_Blind SQL injection.html"><strong aria-hidden="true">20.11.</strong> Blind SQL Injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.4_Code Injection.html"><strong aria-hidden="true">20.12.</strong> Code Injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.5_SSRF.html"><strong aria-hidden="true">20.13.</strong> SSRF</a></li><li class="chapter-item "><a href="../../posts/ctf/1.6_OS command injection.html"><strong aria-hidden="true">20.14.</strong> OS command injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.7_Local file inclusion.html"><strong aria-hidden="true">20.15.</strong> Local file inclusion</a></li><li class="chapter-item "><a href="../../posts/ctf/1.8_Remote file inclusion.html"><strong aria-hidden="true">20.16.</strong> Remote file inclusion</a></li><li class="chapter-item "><a href="../../posts/ctf/1.9_CSRFm.html"><strong aria-hidden="true">20.17.</strong> CSRF</a></li><li class="chapter-item "><a href="../../posts/ctf/1.10_NoSQL injection.html"><strong aria-hidden="true">20.18.</strong> NoSQL injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.11_JSON injection.html"><strong aria-hidden="true">20.19.</strong> JSON injection</a></li><li class="chapter-item "><a href="../../posts/ctf/1.12_CTF_Web_SQL_Note.html"><strong aria-hidden="true">20.20.</strong> CTF Web SQL Note</a></li><li class="chapter-item "><a href="../../posts/ctf/2.1_XXE.html"><strong aria-hidden="true">20.21.</strong> XXE</a></li><li class="chapter-item "><a href="../../posts/ctf/2.2_XSS.html"><strong aria-hidden="true">20.22.</strong> XSS</a></li><li class="chapter-item "><a href="../../posts/ctf/2.3_Upload File.html"><strong aria-hidden="true">20.23.</strong> Upload File</a></li><li class="chapter-item "><a href="../../posts/ctf/2.4_serialize_unserialize.html"><strong aria-hidden="true">20.24.</strong> serialize unserialize</a></li><li class="chapter-item "><a href="../../posts/ctf/2.5_Race condition.html"><strong aria-hidden="true">20.25.</strong> Race condition</a></li><li class="chapter-item "><a href="../../posts/ctf/zip_plain_text_attack.html"><strong aria-hidden="true">20.26.</strong> Zip plain text attack</a></li><li class="chapter-item "><a href="../../posts/ctf/3.3_pwn HCTF2016 brop.html"><strong aria-hidden="true">20.27.</strong> pwn HCTF2016 brop</a></li><li class="chapter-item "><a href="../../posts/ctf/pwn_patch_defense_skill.html"><strong aria-hidden="true">20.28.</strong> PWN Patch defense skill</a></li><li class="chapter-item "><a href="../../posts/ctf/pwn_stack_overflow.html"><strong aria-hidden="true">20.29.</strong> PWN stack overflow</a></li><li class="chapter-item "><a href="../../posts/ctf/pwn_heap_overflow.html"><strong aria-hidden="true">20.30.</strong> PWN heap overflow</a></li><li class="chapter-item "><a href="../../posts/ctf/pwn_format_string_vulnerability.html"><strong aria-hidden="true">20.31.</strong> PWN Format String Vulnerability</a></li><li class="chapter-item "><a href="../../posts/ctf/kali_linux_tutorials.html"><strong aria-hidden="true">20.32.</strong> Kali linux tutorials</a></li><li class="chapter-item "><a href="../../posts/ctf/google_dorks_2023_lists.html"><strong aria-hidden="true">20.33.</strong> Google Dorks 2023 Lists</a></li><li class="chapter-item "><a href="../../posts/ctf/dvwa_writeup.html"><strong aria-hidden="true">20.34.</strong> DVWA WriteUp</a></li><li class="chapter-item "><a href="../../posts/ctf/bwapp_writeup.html"><strong aria-hidden="true">20.35.</strong> bWAPP WriteUp</a></li><li class="chapter-item "><a href="../../posts/ctf/sqlilabs_writeup.html"><strong aria-hidden="true">20.36.</strong> sqlilabs WriteUp</a></li><li class="chapter-item "><a href="../../posts/ctf/pwnable_kr_challenge.html"><strong aria-hidden="true">20.37.</strong> Solutions for pwnable.kr</a></li><li class="chapter-item "><a href="../../posts/ctf/the_periodic_table.html"><strong aria-hidden="true">20.38.</strong> The Periodic Table</a></li><li class="chapter-item "><a href="../../posts/ctf/pwntools_cheatsheet.html"><strong aria-hidden="true">20.39.</strong> Pwntools Cheatsheet</a></li><li class="chapter-item "><a href="../../posts/ctf/gdb_cheatsheet.html"><strong aria-hidden="true">20.40.</strong> GDB Cheatsheet</a></li></ol></li><li class="chapter-item "><a href="../../posts/iltes/iltes.html"><strong aria-hidden="true">21.</strong> ILTES</a><a class="toggle"><div>❱</div></a></li><li><ol class="section"><li class="chapter-item "><a href="../../posts/iltes/iltes_writing.html"><strong aria-hidden="true">21.1.</strong> ILTES Writing</a></li></ol></li></ol>
            </div>
            <div id="sidebar-resize-handle" class="sidebar-resize-handle"></div>
        </nav>

        <!-- Track and set sidebar scroll position -->
        <script>
            var sidebarScrollbox = document.querySelector('#sidebar .sidebar-scrollbox');
            sidebarScrollbox.addEventListener('click', function(e) {
                if (e.target.tagName === 'A') {
                    sessionStorage.setItem('sidebar-scroll', sidebarScrollbox.scrollTop);
                }
            }, { passive: true });
            var sidebarScrollTop = sessionStorage.getItem('sidebar-scroll');
            sessionStorage.removeItem('sidebar-scroll');
            if (sidebarScrollTop) {
                // preserve sidebar scroll position when navigating via links within sidebar
                sidebarScrollbox.scrollTop = sidebarScrollTop;
            } else {
                // scroll sidebar to current active section when navigating via "next/previous chapter" buttons
                var activeSection = document.querySelector('#sidebar .active');
                if (activeSection) {
                    activeSection.scrollIntoView({ block: 'center' });
                }
            }
        </script>

        <div id="page-wrapper" class="page-wrapper">

            <div class="page">
                                <div id="menu-bar-hover-placeholder"></div>
                <div id="menu-bar" class="menu-bar sticky">
                    <div class="left-buttons">
                        <button id="sidebar-toggle" class="icon-button" type="button" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
                            <i class="fa fa-bars"></i>
                        </button>
                        <button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
                            <i class="fa fa-paint-brush"></i>
                        </button>
                        <ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
                            <li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
                            <li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
                            <li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
                            <li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
                            <li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
                        </ul>
                        <button id="search-toggle" class="icon-button" type="button" title="Search. (Shortkey: s)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="S" aria-controls="searchbar">
                            <i class="fa fa-search"></i>
                        </button>
                    </div>

                    <h1 class="menu-title">Andrew&#x27;s Blog</h1>

                    <div class="right-buttons">
                        <a href="https://gitee.com/dnrops/dnrops" title="Git repository" aria-label="Git repository">
                            <i id="git-repository-button" class="fa fa-github"></i>
                        </a>

                    </div>
                </div>

                <div id="search-wrapper" class="hidden">
                    <form id="searchbar-outer" class="searchbar-outer">
                        <input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
                    </form>
                    <div id="searchresults-outer" class="searchresults-outer hidden">
                        <div id="searchresults-header" class="searchresults-header"></div>
                        <ul id="searchresults">
                        </ul>
                    </div>
                </div>

                <!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
                <script>
                    document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
                    document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
                    Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
                        link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
                    });
                </script>

                <div id="content" class="content">
                    <main>
                        <h1 id="how-to-detect-objects-on-images-using-the-yolov8-neural-network"><a class="header" href="#how-to-detect-objects-on-images-using-the-yolov8-neural-network">How to detect objects on images using the YOLOv8 neural network</a></h1>
<p style="display:flex;
    align-items: center;
    justify-content: end;
">Pub Date: 2023-12-08</p>
<h1 id="introduction"><a class="header" href="#introduction">Introduction</a></h1>
<p>Object detection is a computer vision task that involves identifying and locating objects in images or videos. It is an important part of many applications, such as self-driving cars, robotics, and video surveillance.</p>
<p>Over the years, many methods and algorithms have been developed to find objects in images and their positions. The best quality in performing these tasks comes from using convolutional neural networks.</p>
<p>One of the most popular neural networks for this task is YOLO, created in 2015 by Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi in their famous research paper “You Only Look Once: Unified, Real-Time Object Detection”.</p>
<p>Since that time, there have been quite a few versions of YOLO. Recent releases can do even more than object detection. The newest release is <a href="https://ultralytics.com/yolov8">YOLOv8</a>, which we are going to use in this tutorial.</p>
<p>Here, I will show you the main features of this network for object detection. First, we will use a pre-trained model to detect common object classes like cats and dogs. Then, I will show how to train your own model to detect specific object types that you select, and how to prepare the data for this process. Finally, we will create a web application to detect objects on images right in a web browser using the custom trained model.</p>
<p>To follow this tutorial, you should be familiar with <a href="https://python.org/">Python</a> and have a basic understanding of machine learning, neural networks, and their application in object detection. You can watch <a href="https://www.youtube.com/playlist?list=PL_IHmaMAvkVxdDOBRg2CbcJBq9SY7ZUvs">this short video course</a> to familiarize yourself with all required machine learning theory.</p>
<p>Once you’ve refreshed the theory, let’s get started with the practice!</p>
<p><a href=""></a></p>
<h1 id="problems-yolov8-can-solve"><a class="header" href="#problems-yolov8-can-solve"><a href="#problems-yolov8-can-solve"></a>Problems YOLOv8 Can Solve</a></h1>
<p>You can use the YOLOv8 network to solve classification, object detection, and image segmentation problems. All these methods detect objects in images or in videos in different ways, as you can see in the image below:</p>
<div class="table-wrapper"><table><thead><tr><th></th><th></th><th></th></tr></thead><tbody>
<tr><td><strong>Classification</strong></td><td><strong>Detection</strong></td><td><strong>Segmentation</strong></td></tr>
<tr><td><img src="https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/811f3c49fbcd44a69acdd1bb41bcd38d~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.image#?w=224&amp;h=284&amp;s=10469&amp;e=jpg&amp;b=739150" alt="" /></td><td><img src="https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/78bc6cd26ae74f5cb71fc13cb464f97b~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.image#?w=224&amp;h=284&amp;s=11716&amp;e=jpg&amp;b=789552" alt="" /></td><td><img src="https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/581d2a8547db489abebd8d1253ec95de~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.image#?w=224&amp;h=284&amp;s=10436&amp;e=jpg&amp;b=946ed7" alt="" /></td></tr>
</tbody></table>
</div>
<p>The neural network that created and trained for <strong>image classification</strong> determines a class of object on the image and returns its name and the probability of this prediction. For example, on the left image, it returned that this is a “cat” and that the confidence level of this prediction is 92% (0.92).</p>
<p>The neural network for <strong>object detection</strong>, in addition to the object type and probability, returns the coordinates of the object on the image: x, y, width and height, as shown on the second image. Furthermore, object detection neural networks can detect several objects on the image and their bounding boxes.</p>
<p>Finally, in addition to object types and bounding boxes, the neural network trained for <strong>image segmentation</strong> detects the shapes of the objects, as shown on the right image.</p>
<p>There are many different neural network architectures developed for these tasks, and for each of them you had to use a separate network in the past. Fortunately, things changed after the <a href="https://docs.ultralytics.com/">YOLO</a> created. Now you can use a single platform for all these problems.</p>
<p>In this article, we will discover the <strong>object detection</strong> using YOLOv8. I will guide you how to create a web application, that will use it to detect traffic lights and road signs on the images. In the next articles I will cover other features, including image segmentation.</p>
<p>In the next sections we will go through all steps that required to create an object detector. By the end of reading, you will have a complete AI powered web application.</p>
<p><a href=""></a></p>
<h1 id="getting-started-with-yolov8"><a class="header" href="#getting-started-with-yolov8"><a href="#getting-started-with-yolov8"></a>Getting started with YOLOv8</a></h1>
<p>Technically speaking, The <a href="https://ultralytics.com/">YOLOv8</a> is a group of convolutional neural network models, created and trained using the <a href="https://pytorch.org">PyTorch</a> framework.</p>
<p>In addition, the YOLOv8 package provides a single Python API to work with all of them using the same methods. That is why, to use it, you need an environment to run Python code. I highly recommend using the <a href="https://jupyter.org">Jupyter Notebook</a>.</p>
<p>After ensuring that you have Python and Jupyter installed on your computer, run the notebook and install the YOLOv8 package in it by running the following command:</p>
<pre><code>!pip install ultralytics
</code></pre>
<p>The <code>ultralytics</code> package has the <code>YOLO</code> class, that used to create neural network models.</p>
<p>To get access to it, import it to your Python code:</p>
<pre><code>from ultralytics import YOLO
</code></pre>
<p>Now everything is ready to create the neural network model:</p>
<pre><code>model = YOLO(&quot;yolov8m.pt&quot;)
</code></pre>
<p>As I wrote before, the YOLOv8 is a group of neural network models. These models were created and trained using the PyTorch and exported to files with the <code>.pt</code> extension. There are three types of models exist and 5 models of different size for each type:</p>
<div class="table-wrapper"><table><thead><tr><th></th><th></th><th></th><th></th></tr></thead><tbody>
<tr><td><strong>Classification</strong></td><td><strong>Detection</strong></td><td><strong>Segmentation</strong></td><td><strong>Kind</strong></td></tr>
<tr><td>yolov8n-cls.pt</td><td>yolov8n.pt</td><td>yolov8n-seg.pt</td><td>Nano</td></tr>
<tr><td>yolov8s-cls.pt</td><td>yolov8s.pt</td><td>yolov8s-seg.pt</td><td>Small</td></tr>
<tr><td>yolov8m-cls.pt</td><td>yolov8m.pt</td><td>yolov8m-seg.pt</td><td>Medium</td></tr>
<tr><td>yolov8l-cls.pt</td><td>yolov8l.pt</td><td>yolov8l-seg.pt</td><td>Large</td></tr>
<tr><td>yolov8x-cls.pt</td><td>yolov8x.pt</td><td>yolov8x-seg.pt</td><td>Huge</td></tr>
</tbody></table>
</div>
<p>The bigger model you choose, the better prediction quality you could achieve, but the slower it will work. In this tutorial I will cover object detection, that is why on the previous code snippet, I selected the “yolov8m.pt”, which is a middle-sized model for object detection.</p>
<p>When you run this code for the first time, it will download the <code>yolov8m.pt</code> file from the Ultralytics server to the current folder and then, will construct the <code>model</code> object. Now you can train this <code>model</code>, detect objects and export to use in production. For all these tasks, it has convenient methods:</p>
<ul>
<li><a href="https://docs.ultralytics.com/modes/train/">train({path to dataset descriptor file})</a> - used to train the model on images dataset.</li>
<li><a href="https://docs.ultralytics.com/modes/predict">predict({image})</a> - used to make a prediction for specified image, e.g. to detect bounding boxes of all objects, that the model could find on this image.</li>
<li><a href="https://docs.ultralytics.com/modes/export/">export({format})</a> - used to export this model from default PyTorch format to specified one.</li>
</ul>
<p>All YOLOv8 models for object detection shipped already pretrained on the <a href="https://cocodataset.org/">COCO dataset</a>, which is a huge collection of images of 80 types. So, if you do not have specific needs, then you can just run it as is, without additional training. For example, you can download this image as “cat_dog.jpg”:</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--Y1ofWB3n--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/wr8bm7gga15xp9gfz7yz.jpg"><img src="https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4f00654088944cad854b5d4323cff712~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.image#?w=612&amp;h=415&amp;s=34608&amp;e=jpg&amp;b=637c1e" alt="Image description" /></a></p>
<p>and run <code>predict</code> to detect all objects on it:</p>
<pre><code>results = model.predict(&quot;cat_dog.jpg&quot;)
</code></pre>
<p>The <code>predict</code> method accepts many different input types, including a path to a single image, an array of paths to images, the Image object of the well-known <a href="https://pillow.readthedocs.io/en/stable/">PIL</a> Python library and <a href="https://docs.ultralytics.com/modes/predict/#sources">others</a>.</p>
<p>After run the input through the model, it returns an array of results for each input image. As we provided only a single image, it returns an array with a single item, that you can extract this way:</p>
<pre><code>result = results[0]
</code></pre>
<p>The <a href="https://docs.ultralytics.com/modes/predict/#working-with-results">result</a> contains detected objects and convenient properties to work with them. The most important one is the <code>boxes</code> array with information about detected bounding boxes on the image. You can determine how many objects detected, by running the <code>len</code> function:</p>
<pre><code>len(result.boxes)
</code></pre>
<p>When I ran this, I got “2”, which means that there are two boxes detected, perhaps one for the dog and one for the cat.</p>
<p>Then, you can analyze each box either in a loop, or manually. Let’s get the first one:</p>
<pre><code>box = result.boxes[0]
</code></pre>
<p>The <a href="https://docs.ultralytics.com/modes/predict/#boxes">box</a> object contains the properties of the bounding box, including:</p>
<ul>
<li><code>xyxy</code> - the coordinates of the box as an array [x1,y1,x2,y2]</li>
<li><code>cls</code> - the ID of object type</li>
<li><code>conf</code> - the confidence level of the model about this object. If it’s very low, like &lt; 0.5, then you can just ignore the box.</li>
</ul>
<p>Let’s print information about the detected box:</p>
<pre><code>print(&quot;Object type:&quot;, box.cls)
print(&quot;Coordinates:&quot;, box.xyxy)
print(&quot;Probability:&quot;, box.conf)
</code></pre>
<p>For the first box, you will receive the following information:</p>
<pre><code>Object type: tensor([16.])
Coordinates: tensor([[261.1901,  94.3429, 460.5649, 312.9910]])
Probability: tensor([0.9528])
</code></pre>
<p>As written above, the YOLOv8 contains PyTorch models. The outputs from PyTorch models encoded as an array of PyTorch <a href="https://pytorch.org/docs/stable/tensors.html">Tensor</a> objects, so you need to extract the first item from each of these arrays:</p>
<pre><code>print(&quot;Object type:&quot;,box.cls[0])
print(&quot;Coordinates:&quot;,box.xyxy[0])
print(&quot;Probability:&quot;,box.conf[0])
</code></pre>
<pre><code>Object type: tensor(16.)
Coordinates: tensor([261.1901,  94.3429, 460.5649, 312.9910])
Probability: tensor(0.9528)
</code></pre>
<p>Now you see the data as <code>Tensor</code> objects. To unpack actual values from Tensor, you need to use <code>.tolist()</code> method for tensor with array inside and <code>.item()</code> method for tensors with scalar values. Let’s extract the data to appropriate variables:</p>
<pre><code>cords = box.xyxy[0].tolist()
class_id = box.cls[0].item()
conf = box.conf[0].item()
print(&quot;Object type:&quot;, class_id)
print(&quot;Coordinates:&quot;, cords)
print(&quot;Probability:&quot;, conf)
</code></pre>
<pre><code>Object type: 16.0
Coordinates: [261.1900634765625, 94.3428955078125, 460.5649108886719, 312.9909973144531]
Probability: 0.9528293609619141
</code></pre>
<p>Now you see the actual data. The coordinates can be rounded, the probability also can be rounded to two digits after the dot.</p>
<p>The object type is <code>16</code> here. What does it mean? Let’s talk more about that. All objects, that the neural network can detect, have numeric IDs. In case of YOLOv8 pretrained model, there are 80 object types with IDs from 0 to 79. The COCO object classes are well known and can be easily googled on the Internet. In addition, the YOLOv8 result object contains the convenient <code>names</code> property to get these classes:</p>
<pre><code>print(result.names)
</code></pre>
<pre><code>{0: 'person',
 1: 'bicycle',
 2: 'car',
 3: 'motorcycle',
 4: 'airplane',
 5: 'bus',
 6: 'train',
 7: 'truck',
 8: 'boat',
 9: 'traffic light',
 10: 'fire hydrant',
 11: 'stop sign',
 12: 'parking meter',
 13: 'bench',
 14: 'bird',
 15: 'cat',
 16: 'dog',
 17: 'horse',
 18: 'sheep',
 19: 'cow',
 20: 'elephant',
 21: 'bear',
 22: 'zebra',
 23: 'giraffe',
 24: 'backpack',
 25: 'umbrella',
 26: 'handbag',
 27: 'tie',
 28: 'suitcase',
 29: 'frisbee',
 30: 'skis',
 31: 'snowboard',
 32: 'sports ball',
 33: 'kite',
 34: 'baseball bat',
 35: 'baseball glove',
 36: 'skateboard',
 37: 'surfboard',
 38: 'tennis racket',
 39: 'bottle',
 40: 'wine glass',
 41: 'cup',
 42: 'fork',
 43: 'knife',
 44: 'spoon',
 45: 'bowl',
 46: 'banana',
 47: 'apple',
 48: 'sandwich',
 49: 'orange',
 50: 'broccoli',
 51: 'carrot',
 52: 'hot dog',
 53: 'pizza',
 54: 'donut',
 55: 'cake',
 56: 'chair',
 57: 'couch',
 58: 'potted plant',
 59: 'bed',
 60: 'dining table',
 61: 'toilet',
 62: 'tv',
 63: 'laptop',
 64: 'mouse',
 65: 'remote',
 66: 'keyboard',
 67: 'cell phone',
 68: 'microwave',
 69: 'oven',
 70: 'toaster',
 71: 'sink',
 72: 'refrigerator',
 73: 'book',
 74: 'clock',
 75: 'vase',
 76: 'scissors',
 77: 'teddy bear',
 78: 'hair drier',
 79: 'toothbrush'}
</code></pre>
<p>Here is it: everything that this model can detect. Now you can find that <code>16</code> is “dog”, so, this bounding box is the bounding box for detected DOG. Let’s modify the output to show results in a more representative way:</p>
<pre><code>cords = box.xyxy[0].tolist()
cords = [round(x) for x in cords]
class_id = result.names[box.cls[0].item()]
conf = round(box.conf[0].item(), 2)
print(&quot;Object type:&quot;, class_id)
print(&quot;Coordinates:&quot;, cords)
print(&quot;Probability:&quot;, conf)
</code></pre>
<p>In this code I rounded all coordinates using the Python <a href="https://www.w3schools.com/python/python_lists_comprehension.asp">list comprehensions</a>, then, I got the name of detected object class by ID, using the <code>result.names</code> dictionary and also rounded the confidence. Finally, you should get the following output:</p>
<pre><code>Object type: dog
Coordinates: [261, 94, 461, 313]
Probability: 0.95
</code></pre>
<p>This data is good enough to show in the user interface. Let’s now write a code to get this information for all detected boxes in a loop:</p>
<pre><code>for box in result.boxes:
  class_id = result.names[box.cls[0].item()]
  cords = box.xyxy[0].tolist()
  cords = [round(x) for x in cords]
  conf = round(box.conf[0].item(), 2)
  print(&quot;Object type:&quot;, class_id)
  print(&quot;Coordinates:&quot;, cords)
  print(&quot;Probability:&quot;, conf)
  print(&quot;---&quot;)
</code></pre>
<p>This code will do the same for each box and will output the following:</p>
<pre><code>Object type: dog
Coordinates: [261, 94, 461, 313]
Probability: 0.95
---
Object type: cat
Coordinates: [140, 170, 256, 316]
Probability: 0.92
---
</code></pre>
<p>This way you can play with other images and see everything, that COCO-trained model can detect on them.</p>
<p>Also, if you like, you can rewrite the same code in a functional style, using list comprehensions:</p>
<pre><code>def print_box(box):
    class_id, cords, conf = box
    print(&quot;Object type:&quot;, class_id)
    print(&quot;Coordinates:&quot;, cords)
    print(&quot;Probability:&quot;, conf)
    print(&quot;---&quot;)

[
    print_box([
        result.names[box.cls[0].item()],
        [round(x) for x in box.xyxy[0].tolist()],
        round(box.conf[0].item(), 2)
    ]) for box in result.boxes
]
</code></pre>
<p>This video shows the whole coding session of this chapter in Jupyter Notebook, assuming that it’s <a href="https://jupyter.org/install">installed</a>.</p>
<p>Using the models pretrained on well-known objects is ok to start, but in practice, you may need a solution to detect specific objects for a concrete business problem.</p>
<p>For example, someone may need to detect specific products on supermarket shelves or discover brain tumors on x-rays. It’s highly likely that this information is not available in public datasets, and there are no free models that know about everything.</p>
<p>So, you have to teach your own model to detect these types of objects. To do that, you need to create a database of annotated images for your problem and train the model on these images.</p>
<p><a href=""></a></p>
<h1 id="how-to-prepare-data-to-train-the-yolov8-model"><a class="header" href="#how-to-prepare-data-to-train-the-yolov8-model"><a href="#how-to-prepare-data-to-train-the-yolov8-model"></a>How to prepare data to train the YOLOv8 model</a></h1>
<p>To train the model, you need to prepare annotated images and split them to training and validation datasets. The training set will be used to teach the model and the validation set will be used to test the results of this study, to measure the quality of the trained model. You can put 80% of images to the training set and 20% to the validation set.</p>
<p>These are the steps that you need to follow to create each of the datasets:</p>
<ol>
<li>Decide and encode classes of objects you want to teach your model to detect. For example, if you want to detect only cats and dogs, then you can state that “0” is cat and “1” is dog.</li>
<li>Create a folder for your dataset and two subfolders in it: “images” and “labels”.</li>
<li>Put the images to the “images” subfolder. The more images you collect, the better for training.</li>
<li>For each image, create an annotation text file in the “labels” subfolder. Annotation text files should have the same names as image files and the “.txt” extensions. In annotation file you should add records about each object, that exist on the appropriate image in the following format:</li>
</ol>
<pre><code>{object_class_id} {x_center} {y_center} {width} {height}
</code></pre>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--7EeI3gIK--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/09nf4zkcet5g05i6po5u.png"><img src="https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/c1b9d6c7c5284064a67ff6b3b3818ac6~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.image#?w=400&amp;h=507&amp;s=102903&amp;e=png&amp;b=74924e" alt="Image description" /></a></p>
<p>Actually, this is the most time-consuming manual work in a machine learning process: to measure bounding boxes for all objects and add them to annotation files. Moreover, coordinates should be <strong>normalized</strong> to fit in a range from 0 to 1. To calculate them, you need to use the following formulas:</p>
<p>x_center = (box_x_left+box_x_width/2)/image_width<br />
y_center = (box_y_top+box_height/2)/image_height<br />
width = box_width/image_width<br />
height = box_height/image_height</p>
<p>For example, if you want to add the “cat_dog.jpg” image that we used before to the dataset, you need to copy it to the “images” folder and then measure and collect the following data about the image, and it’s bounding boxes:</p>
<p><strong>Image:</strong></p>
<p>image_width = 612<br />
image_height = 415</p>
<p><strong>Objects:</strong></p>
<div class="table-wrapper"><table><thead><tr><th></th><th></th></tr></thead><tbody>
<tr><td><strong>Dog</strong></td><td><strong>Cat</strong></td></tr>
<tr><td>box_x_left=261 box_x_top=94 box_width=200 box_height=219</td><td>box_x_left=140 box_x_top=170 box_width=116 box_height=146</td></tr>
</tbody></table>
</div>
<p>Then, create the “cat_dog.txt” file in the “labels” folder and, using the formulas above, calculate the coordinates:</p>
<p>Dog (class id=1):</p>
<p>x_center = (261+200/2)/612 = 0.589869281<br />
y_center = (94+219/2)/415 = 0.490361446<br />
width = 200/612 = 0.326797386<br />
height = 219/415 = 0.527710843</p>
<p>Cat (class id=0)</p>
<p>x_center = (140+116/2)/612 = 0.323529412<br />
y_center = (170+146/2)/415 = 0.585542169<br />
width = 116/612 = 0.189542484<br />
height = 146/415 = 0.351807229</p>
<p>and add the following lines to the file:</p>
<pre><code>1 0.589869281 0.490361446 0.326797386 0.527710843
0 0.323529412 0.585542169 0.189542484 0.351807229
</code></pre>
<p>The first line contains a bounding box for the dog (class id=1), the second line contains a bounding box for the cat (class id=0). Of course, you can have the image with many dogs and many cats at the same time, and you can add bounding boxes for all of them.</p>
<p>After adding and annotating all images, the dataset is ready. You need to create two datasets and place them in different folders. The final folder structure can look like this:</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--lZqfY3F3--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/7obu30iswcnm9hb8sk93.png"><img src="https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/1350b02854054a4cba50acc4ca867765~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.image#?w=140&amp;h=176&amp;s=1593&amp;e=png&amp;b=505050" alt="Image description" /></a></p>
<p>Here the training dataset located in the “train” folder and the validation dataset located in the “val” folder.</p>
<p>Finally, you need to create a dataset descriptor YAML-file, that points to created datasets and describes the object classes in them. This is a sample of this file for the data, created above:</p>
<pre><code>train: ../train/images
val: ../val/images

nc: 2
names: ['cat','dog']
</code></pre>
<p>In the first two lines, you need to specify paths to the images of the training and the validation datasets. The paths can be either relative to the current folder or absolute. Then, the <code>nc</code> line specifies the <strong>n</strong>umber of <strong>c</strong>lasses that exist in these datasets and the <code>names</code> is an array of class names in correct order. Indexes of these items are numbers that you used when annotated the images, and these indexes will be returned by the model when detect objects using the <code>predict</code> method. So, if you used “0” for cats, then it should be the first item in the <code>names</code> array.</p>
<p>This YAML file should be passed to the <code>train</code> method of the model to start a training process.</p>
<p>To make this process easier, there are a lot of programs exist to visually annotate images for machine learning. You can ask a search engine something like “software to annotate images for machine learning” to get a list of them. There are also many online tools that can do all this work. One of the great online tools for this is the <a href="https://roboflow.com/annotate">Roboflow Annotate</a>. Using this service, you just need to upload your images, draw bounding boxes on them, and set class for each bounding box. Then, the tool will automatically create annotation files, split your data to train and validation datasets, will create a YAML descriptor file, and then you can export and download the annotated data as a ZIP file.</p>
<p>In the next video, I show how to use the Roboflow to create the “cats and dogs” micro-dataset.</p>
<p>For real life problems, that database should be much bigger. To train a good model, you should have hundreds or thousands of annotated images.</p>
<p>Also, when prepare images database, try to make it balanced. It should have equal number of objects of each class, e.g. equal number of dogs and cats. Otherwise, the model trained on it could predict one class better than another.</p>
<p>After the data is ready, copy it to the folder with your Python code, that you will use for training and return back to your Jupyter Notebook to start the training process.</p>
<p><a href=""></a></p>
<h1 id="how-to-train-the-yolov8-model"><a class="header" href="#how-to-train-the-yolov8-model"><a href="#how-to-train-the-yolov8-model"></a>How to train the YOLOv8 model</a></h1>
<p>After the data is ready, you need to pass it through the model. To make it more interesting, we will not use this small “cats and dogs” dataset. We will use other custom dataset for training. It contains traffic lights and road signs. This is free dataset that I got from the Roboflow Universe: <a href="https://universe.roboflow.com/roboflow-100/road-signs-6ih4y">https://universe.roboflow.com/roboflow-100/road-signs-6ih4y</a>. Press “Download Dataset” and select the “YOLOv8” as a format.</p>
<p>If it will not available on the Roboflow when you read these lines, then you can get it from <a href="https://drive.google.com/file/d/1PNktsghBqIJVgxa-34FqO3yODNJbH3B0/view?usp=sharing">my Google Drive</a>. This dataset can be used to teach the YOLOv8 to detect different objects on the roads, like displayed on the next screenshot.</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--xsA9xwIj--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/8vmnnu8lcvawt7gxndjy.png"><img src="https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/784918db281a438f87ba287786293462~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.image#?w=640&amp;h=640&amp;s=41345&amp;e=jpg&amp;b=39341c" alt="Image description" /></a></p>
<p>You can open the downloaded zip file and ensure that it structured using the rules, described above. You can find the dataset descriptor file <code>data.yaml</code> in the archive as well.</p>
<p>If you downloaded the archive from the Roboflow, it will contain the additional “test” dataset, which is not used by the training process. You can use the images from it for additional testing on your own after training.</p>
<p>Extract the archive to the folder with your Python code and execute the <code>train</code> method to start a training loop:</p>
<pre><code>model.train(data=&quot;data.yaml&quot;, epochs=30)
</code></pre>
<p>The <code>data</code> is the only required option. You have to pass the YAML descriptor file to it. The <code>epochs</code> option specifies the number of training cycles (100 by default). There are other <a href="https://docs.ultralytics.com/modes/train/#arguments">options</a>, that can affect the process and quality of trained model.</p>
<p>Each training cycle consists of two phases: training phase and validation phase.</p>
<p>On the training phase, the <code>train</code> method does the following:</p>
<ul>
<li>Extracts the random batch of images from the training dataset (the number of images in the batch can be specified using the <code>batch</code> option).</li>
<li>Passes these images through the model and receives the resulting bounding boxes of all detected objects and their classes.</li>
<li>Passes the result to the loss function, that used to compare the received output with correct result from annotation files for these images. The loss function calculates the amount of error.</li>
<li>The result of loss function passed to the <code>optimizer</code> to adjust the model weights based on the amount of error in correct direction to reduce the error in the next cycle. By default, the <a href="https://towardsdatascience.com/stochastic-gradient-descent-clearly-explained-53d239905d31">SGD (Stochastic Gradient Descent)</a> optimizer used, but you can try others, like <a href="https://www.linkedin.com/pulse/understanding-adam-optimizer-gradient-descent-evan-dunbar/">Adam</a> to see the difference.</li>
</ul>
<p>On the validation phase, the <code>train</code> does the following:</p>
<ul>
<li>Extracts the images from the validation dataset.</li>
<li>Passes them through the model and receives the detected bounding boxes for these images.</li>
<li>Compares the received result with true values for these images from annotation text files.</li>
<li>Calculates the precision of the model based on the difference between actual and expected results.</li>
</ul>
<p>The progress and results of each phase for each epoch displayed on the screen. This way you can see how the model learns and improves from epoch to epoch.</p>
<p>When you run the <code>train</code> code, you will see the similar output during the training loop:</p>
<p><a href="https://res.cloudinary.com/practicaldev/image/fetch/s--yAScE4ce--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/u893m4q54pr7kkqkgdqr.png"><img src="https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/fc28b76804d64571a6873d8440bc049d~tplv-k3u1fbpfcp-jj-mark:0:0:0:0:q75.image#?w=800&amp;h=177&amp;s=20548&amp;e=png&amp;b=fcfcfc" alt="Image description" /></a></p>
<p>For each epoch it shows summary for both training and validation phases: the lines 1 and 2 show results of training phase and the lines 3 and 4 shows results of validation phase for each epoch.</p>
<p>The training phase includes calculation of the amount of error in a loss function, so, the most valuable metrics here are <code>box_loss</code> and <code>cls_loss</code>.</p>
<ul>
<li><code>box_loss</code> shows the amount of error in detected bounding boxes.</li>
<li><code>cls_loss</code> shows the amount of error in detected object classes.</li>
</ul>
<p>Why the loss split to several metrics? Because the model could correctly detect the bounding box around the object, but incorrectly detect the object class in this box. For example, in my practice, it detected the dog as a horse, but the dimensions of the object were detected correctly.</p>
<p>If the model really learns something from data, then you should see that these values decrease from epoch to epoch. On previous screenshot the <code>box_loss</code> decreases: 0.7751,0.7473,0.742 and the <code>cls_loss</code> decreases too: 0.702,0.6422,0.6211.</p>
<p>On the validation phase, it calculates the quality of the model after training using the images from the validation dataset. The most valuable quality metric is mAP50-95, which is a <a href="https://www.v7labs.com/blog/mean-average-precision">Mean Average Precision</a>. If the model learns and improves, the precision should grow from epoch to epoch. On previous screenshot it slowly grows: 0.788, 0.788, 0.791.</p>
<p>If after the last epoch you did not get acceptable precision, you can increase the number of epochs and run the training again. Also, you can tune other parameters like <code>batch</code>, <code>lr0</code>, <code>lrf</code> or change used <code>optimizer</code>. There are no clear rules what to do here, but there are a lot of recommendations to write a book about this. But in a few words, need to experiment and compare results.</p>
<p>In addition to these metrics, the <code>train</code> writes a lot of statistics during its work on disk. When training starts, it creates the <code>runs/detect/train</code> subfolder in the current folder and after each epoch it logs different log files to it.</p>
<p>Furthermore, it exports the trained model after each epoch to the <code>/runs/detect/train/weights/last.pt</code> file and the model with the highest precision to the <code>/runs/detect/train/weights/best.pt</code> file. So, after training finished, you can get the <code>best.pt</code> file to use in production.</p>
<p>Watch this video to see how the training process works. I used the <a href="https://colab.research.google.com/">Google Colab</a> which is a cloud version of Jupyter Notebook to get access to hardware with more powerful GPU to speed up the training process. The video shows how to train the model on 5 epochs and download the final <code>best.pt</code> model. In real world problems, you need to run much more epochs and be prepared to wait hours or maybe days until training finishes.</p>
<p>After it finished, it’s time to run the trained model in production. In the next section, we will create a web service to detect objects on images online in a web browser.</p>
<p><a href=""></a></p>
<h1 id="how-to-create-an-object-detection-web-service"><a class="header" href="#how-to-create-an-object-detection-web-service"><a href="#how-to-create-an-object-detection-web-service"></a>How to create an object detection web service</a></h1>
<p>This is a moment when we finish experiments with the model in the Jupyter Notebook. Next code you need to write as a separate project, using any Python IDE, like <a href="https://code.visualstudio.com/">VS Code</a> or <a href="https://www.jetbrains.com/pycharm/">PyCharm</a>💚.</p>
<p>The web service that we are going to create will have a web page with a file input field and an HTML5 canvas element. When the user selects an image file using the input field, the interface will send it to the backend. Then, the backend will pass the image through the model that we created and trained and return the array of detected bounding boxes to the web page. When receive this, the frontend will draw the image on the canvas element and the detected bounding boxes on top of it. The service will look and work as demonstrated on this video:</p>
<p>On the video, I used the model trained on 30 epochs, and it still does not detect some traffic lights. You can try to train it more to get better results. However, the best way to improve the quality of machine learning is adding more and more data. So, as an additional practice, you can import the dataset folder to the Roboflow, then add and annotate more images to it and then use the updated data to continue training the model.</p>
<p><a href=""></a></p>
<h2 id="how-to-create-a-frontend"><a class="header" href="#how-to-create-a-frontend"><a href="#how-to-create-a-frontend"></a>How to create a frontend</a></h2>
<p>To start with, create a folder for a new Python project and the <code>index.html</code> file in it for the frontend web page. Here is a content of this file</p>
<pre><code>&lt;!DOCTYPE html&gt;
&lt;html lang=&quot;en&quot;&gt;
&lt;head&gt;
    &lt;meta charset=&quot;UTF-8&quot;&gt;
    &lt;title&gt;YOLOv8 Object Detection&lt;/title&gt;
    &lt;style&gt;
        canvas {
            display:block;
            border: 1px solid black;
            margin-top:10px;
        }
    &lt;/style&gt;
&lt;/head&gt;
&lt;body&gt;
    &lt;input id=&quot;uploadInput&quot; type=&quot;file&quot;/&gt;
    &lt;canvas&gt;&lt;/canvas&gt;
    &lt;script&gt;
       /**
       * &quot;Upload&quot; button onClick handler: uploads selected 
       * image file to backend, receives an array of
       * detected objects and draws them on top of image
       */
       const input = document.getElementById(&quot;uploadInput&quot;);
       input.addEventListener(&quot;change&quot;,async(event) =&gt; {
           const file = event.target.files[0];
           const data = new FormData();
           data.append(&quot;image_file&quot;,file,&quot;image_file&quot;);
           const response = await fetch(&quot;/detect&quot;,{
               method:&quot;post&quot;,
               body:data
           });
           const boxes = await response.json();
           draw_image_and_boxes(file,boxes);
       })

       /**
       * Function draws the image from provided file
       * and bounding boxes of detected objects on
       * top of the image
       * @param file Uploaded file object
       * @param boxes Array of bounding boxes in format
         [[x1,y1,x2,y2,object_type,probability],...]
       */
       function draw_image_and_boxes(file,boxes) {
          const img = new Image()
          img.src = URL.createObjectURL(file);
          img.onload = () =&gt; {
              const canvas = document.querySelector(&quot;canvas&quot;);
              canvas.width = img.width;
              canvas.height = img.height;
              const ctx = canvas.getContext(&quot;2d&quot;);
              ctx.drawImage(img,0,0);
              ctx.strokeStyle = &quot;#00FF00&quot;;
              ctx.lineWidth = 3;
              ctx.font = &quot;18px serif&quot;;
              boxes.forEach(([x1,y1,x2,y2,label]) =&gt; {
                  ctx.strokeRect(x1,y1,x2-x1,y2-y1);
                  ctx.fillStyle = &quot;#00ff00&quot;;
                  const width = ctx.measureText(label).width;
                  ctx.fillRect(x1,y1,width+10,25);
                  ctx.fillStyle = &quot;#000000&quot;;
                  ctx.fillText(label,x1,y1+18);
              });
          }
       }
  &lt;/script&gt;  
&lt;/body&gt;
&lt;/html&gt;
</code></pre>
<p>The HTML part is very tiny and consists only from the file input field with “uploadInput” ID and the canvas element below it. Then, in the Javascript part, we define an “onChange” event handler for the input field. When the user selects an image file, the handler uses the <code>fetch</code> to make a POST request to the <code>/detect</code> backend endpoint (which we will create later) and send this image file to it.</p>
<p>The backend should detect objects on this image and return a response with a <code>boxes</code> array as a JSON. This response then decoded and passed to the “draw_image_and_boxes” function along with an image file itself.</p>
<p>The “draw_image_and_boxes” function loads the image from file and as soon as it loaded, draws it on canvas. Then, it draws each bounding box with class label on top of the canvas with the image.</p>
<p>So, now let’s create a backend with <code>/detect</code> endpoint for it.</p>
<p><a href=""></a></p>
<h2 id="how-to-create-a-backend"><a class="header" href="#how-to-create-a-backend"><a href="#how-to-create-a-backend"></a>How to create a backend</a></h2>
<p>We will create backend using <a href="https://flask.palletsprojects.com/en/2.2.x/">Flask</a>. The Flask has its own internal web server, but as stated by the Flask developers, it’s not enough reliable for production, so we will use the <a href="https://flask.palletsprojects.com/en/2.2.x/deploying/waitress/">Waitress</a> web server to run the Flask app in it.</p>
<p>Also, we will use a <a href="https://pillow.readthedocs.io/en/stable/">Pillow</a> library to read an uploaded binary file as an image. Ensure that all packages installed to your system before continue:</p>
<pre><code>pip3 install flask
pip3 install waitress
pip3 install pillow
</code></pre>
<p>The backend will be in a single file. Let’s name it <code>object_detector.py</code>:</p>
<pre><code>from ultralytics import YOLO
from flask import request, Flask, jsonify
from waitress import serve
from PIL import Image
import json

app = Flask(__name__)

@app.route(&quot;/&quot;)
def root():
    &quot;&quot;&quot;
    Site main page handler function.
    :return: Content of index.html file
    &quot;&quot;&quot;
    with open(&quot;index.html&quot;) as file:
        return file.read()


@app.route(&quot;/detect&quot;, methods=[&quot;POST&quot;])
def detect():
    &quot;&quot;&quot;
        Handler of /detect POST endpoint
        Receives uploaded file with a name &quot;image_file&quot;, 
        passes it through YOLOv8 object detection 
        network and returns an array of bounding boxes.
        :return: a JSON array of objects bounding 
        boxes in format 
        [[x1,y1,x2,y2,object_type,probability],..]
    &quot;&quot;&quot;
    buf = request.files[&quot;image_file&quot;]
    boxes = detect_objects_on_image(Image.open(buf.stream))
    return jsonify(boxes)    


def detect_objects_on_image(buf):
    &quot;&quot;&quot;
    Function receives an image,
    passes it through YOLOv8 neural network
    and returns an array of detected objects
    and their bounding boxes
    :param buf: Input image file stream
    :return: Array of bounding boxes in format 
    [[x1,y1,x2,y2,object_type,probability],..]
    &quot;&quot;&quot;
    model = YOLO(&quot;best.pt&quot;)
    results = model.predict(buf)
    result = results[0]
    output = []
    for box in result.boxes:
        x1, y1, x2, y2 = [
          round(x) for x in box.xyxy[0].tolist()
        ]
        class_id = box.cls[0].item()
        prob = round(box.conf[0].item(), 2)
        output.append([
          x1, y1, x2, y2, result.names[class_id], prob
        ])
    return output

serve(app, host='0.0.0.0', port=8080)
</code></pre>
<p>First, we import the required libraries:</p>
<ul>
<li><a href="https://github.com/ultralytics/ultralytics">ultralytics</a> for the YOLOv8 model.</li>
<li><a href="https://flask.palletsprojects.com/en/2.2.x/">flask</a> to create a <code>Flask</code> web application, to receive <code>requests</code> from frontend and to send <code>responses</code> back to it. Also, <code>jsonify</code> imported to convert result to JSON.</li>
<li><a href="https://flask.palletsprojects.com/en/2.2.x/deploying/waitress/">waitress</a> to run a web server and <code>serve</code> the Flask web <code>app</code> in it.</li>
<li><a href="https://pillow.readthedocs.io/en/stable/">PIL</a> to load an uploaded file as an <code>Image</code> object, that required for YOLOv8.</li>
</ul>
<p>Then, we define two routes:</p>
<ul>
<li><code>/</code> that serves as a root of web service. It just returns a content of the “index.html” file.</li>
<li><code>/detect</code> that responds to an image upload requests from frontend. It converts the RAW file to the Pillow Image object, then, passes this image to the <code>detect_objects_on_image</code> function.</li>
</ul>
<p>The <code>detect_objects_on_image</code> function creates a model object, based on the <code>best.pt</code> model, that we trained in the previous section. Ensure that this file exists in the folder, where you write the code.</p>
<p>Then it calls the <code>predict</code> method for the image. The <code>predict</code> returns the detected bounding boxes. Then for each box it extracts the coordinates, class name and probability in a way, as we did in the beginning of the tutorial, and adds this info to the output array. Finally, the function returns the array of detected object coordinates and their classes.</p>
<p>After this, the array encoded to JSON and returned to the frontend.</p>
<p>Finally, the last line of code starts the web server on port 8080, that serves the <code>app</code> Flask application.</p>
<p>To run the service, execute the following command:</p>
<pre><code>python3 object_detector.py
</code></pre>
<p>If the code written without mistakes and all dependencies installed, you can open <code>http:///localhost:8080</code> in a web browser. It should show the <code>index.html</code> page. When you select any image file, it will process it and display bounding boxes around all detected objects (or just display the image if nothing detected on it).</p>
<p>The web service we just created is universal. You can use it with any YOLOv8 model. Now it detects traffic lights and road signs, using the <code>best.pt</code> model we created. However, you can change it to use other model, like the <code>yolov8m.pt</code> model used earlier to detect cats, dogs and other object classes, that pretrained YOLOv8 models can detect.</p>
<p><a href=""></a></p>
<h1 id="conclusion"><a class="header" href="#conclusion"><a href="#conclusion"></a>Conclusion</a></h1>
<p>In this tutorial, I guided you thought a process of creating an AI powered web application that uses the YOLOv8 - the state-of-the-art convolutional neural network for object detection. We covered such steps as creating models, using the pretrained models, prepare the data to train custom models and finally created a web application with frontend and backend, that uses the custom trained YOLOv8 model to detect traffic lights and road signs.</p>
<p>You can find a source code of this app in this GitHub repository: <a href="https://github.com/AndreyGermanov/yolov8_pytorch_python">https://github.com/AndreyGermanov/yolov8_pytorch_python</a></p>
<p>For all the job, we used the Ultralytics high level APIs, provided with YOLOv8 package by default. These APIs are based on the PyTorch framework, that used to create the bigger part of neural networks today. It’s quite convenient on the one hand, but dependence on these high level APIs has a negative effect as well. If you need to run this web app in production, you should install all this environment there, including Python, PyTorch and many other dependencies. To run this on a clean new server, you’ll need to download and install more than 1 GB of third party libraries!! This is definitely not a way to go. Also, what if you do not have Python in your production environment? What if all your other code written on other programming language, and you do not plan to use Python? Or what if you want to run the model on mobile phone on Android or iOS?</p>
<p>Using Ultralytics packages is great for experimenting, training and preparing the models for production. However, in production itself, you should get rid of these high-level APIs. You have to load and use the model directly. To do this, you need to understand how the YOLOv8 neural network works under the hood and write more code to provide input to the model and to process the output from it. As a reward, you will get an opportunity to make your apps tiny and fast, you will not need to have PyTorch installed to run them. Furthermore, you will be able to run your models even without Python, using many other programming languages, including Julia, C++, Node.js on backend, or even without backend at all. You can run the YOLOv8 models right in a browser, using only JavaScript on frontend. Want to know how? This will be in the next article of my YOLOv8 series. Follow me to know first when it published.</p>

                    </main>

                    <nav class="nav-wrapper" aria-label="Page navigation">
                        <!-- Mobile navigation buttons -->
                            <a rel="prev" href="../../posts/python/sanctum_model_yaml.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
                                <i class="fa fa-angle-left"></i>
                            </a>

                            <a rel="next" href="../../posts/python/use_huggingface_model.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
                                <i class="fa fa-angle-right"></i>
                            </a>

                        <div style="clear: both"></div>
                    </nav>
                </div>
            </div>

            <nav class="nav-wide-wrapper" aria-label="Page navigation">
                    <a rel="prev" href="../../posts/python/sanctum_model_yaml.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
                        <i class="fa fa-angle-left"></i>
                    </a>

                    <a rel="next" href="../../posts/python/use_huggingface_model.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
                        <i class="fa fa-angle-right"></i>
                    </a>
            </nav>

        </div>



        <script>
            window.playground_line_numbers = true;
        </script>

        <script>
            window.playground_copyable = true;
        </script>

        <script src="../../ace.js"></script>
        <script src="../../editor.js"></script>
        <script src="../../mode-rust.js"></script>
        <script src="../../theme-dawn.js"></script>
        <script src="../../theme-tomorrow_night.js"></script>

        <script src="../../elasticlunr.min.js"></script>
        <script src="../../mark.min.js"></script>
        <script src="../../searcher.js"></script>

        <script src="../../clipboard.min.js"></script>
        <script src="../../highlight.js"></script>
        <script src="../../book.js"></script>

        <!-- Custom JS scripts -->
        <script src="../../src/js/custom.js"></script>


    </div>
    </body>
</html>
