{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "name": "YOLOv5 Tutorial", "provenance": [], "collapsed_sections": [], "include_colab_link": true }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { "484511f272e64eab8b42e68dac5f7a66": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_view_name": "HBoxView", "_dom_classes": [], "_model_name": "HBoxModel", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", "layout": "IPY_MODEL_78cceec059784f2bb36988d3336e4d56", "_model_module": "@jupyter-widgets/controls", "children": [ "IPY_MODEL_ab93d8b65c134605934ff9ec5efb1bb6", "IPY_MODEL_30df865ded4c434191bce772c9a82f3a", "IPY_MODEL_20cdc61eb3404f42a12b37901b0d85fb" ] } }, "78cceec059784f2bb36988d3336e4d56": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, "right": null, "justify_content": null, "_view_module": "@jupyter-widgets/base", "overflow": null, "_model_module_version": "1.2.0", "_view_count": null, "flex_flow": null, "width": null, "min_width": null, "border": null, "align_items": null, "bottom": null, "_model_module": "@jupyter-widgets/base", "top": null, "grid_column": null, "overflow_y": null, "overflow_x": null, "grid_auto_flow": null, "grid_area": null, "grid_template_columns": null, "flex": null, "_model_name": "LayoutModel", "justify_items": null, "grid_row": null, "max_height": null, "align_content": null, "visibility": null, "align_self": null, "height": null, "min_height": null, "padding": null, "grid_auto_rows": null, "grid_gap": null, "max_width": null, "order": null, "_view_module_version": "1.2.0", "grid_template_areas": null, "object_position": null, "object_fit": null, "grid_auto_columns": null, "margin": null, "display": null, "left": null } }, "ab93d8b65c134605934ff9ec5efb1bb6": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_view_name": "HTMLView", "style": "IPY_MODEL_2d7239993a9645b09b221405ac682743", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "value": "100%", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", "layout": "IPY_MODEL_17b5a87f92104ec7ab96bf507637d0d2" } }, "30df865ded4c434191bce772c9a82f3a": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_view_name": "ProgressView", "style": "IPY_MODEL_2358bfb2270247359e94b066b3cc3d1f", "_dom_classes": [], "description": "", "_model_name": "FloatProgressModel", "bar_style": "success", "max": 818322941, "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "value": 818322941, "_view_count": null, "_view_module_version": "1.5.0", "orientation": "horizontal", "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", "layout": "IPY_MODEL_3e984405db654b0b83b88b2db08baffd" } }, "20cdc61eb3404f42a12b37901b0d85fb": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_view_name": "HTMLView", "style": "IPY_MODEL_654d8a19b9f949c6bbdaf8b0875c931e", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "value": " 780M/780M [00:33<00:00, 24.4MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", "layout": "IPY_MODEL_896030c5d13b415aaa05032818d81a6e" } }, "2d7239993a9645b09b221405ac682743": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "DescriptionStyleModel", "description_width": "", "_view_module": "@jupyter-widgets/base", "_model_module_version": "1.5.0", "_view_count": null, "_view_module_version": "1.2.0", "_model_module": "@jupyter-widgets/controls" } }, "17b5a87f92104ec7ab96bf507637d0d2": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, "right": null, "justify_content": null, "_view_module": "@jupyter-widgets/base", "overflow": null, "_model_module_version": "1.2.0", "_view_count": null, "flex_flow": null, "width": null, "min_width": null, "border": null, "align_items": null, "bottom": null, "_model_module": "@jupyter-widgets/base", "top": null, "grid_column": null, "overflow_y": null, "overflow_x": null, "grid_auto_flow": null, "grid_area": null, "grid_template_columns": null, "flex": null, "_model_name": "LayoutModel", "justify_items": null, "grid_row": null, "max_height": null, "align_content": null, "visibility": null, "align_self": null, "height": null, "min_height": null, "padding": null, "grid_auto_rows": null, "grid_gap": null, "max_width": null, "order": null, "_view_module_version": "1.2.0", "grid_template_areas": null, "object_position": null, "object_fit": null, "grid_auto_columns": null, "margin": null, "display": null, "left": null } }, "2358bfb2270247359e94b066b3cc3d1f": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "ProgressStyleModel", "description_width": "", "_view_module": "@jupyter-widgets/base", "_model_module_version": "1.5.0", "_view_count": null, "_view_module_version": "1.2.0", "bar_color": null, "_model_module": "@jupyter-widgets/controls" } }, "3e984405db654b0b83b88b2db08baffd": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, "right": null, "justify_content": null, "_view_module": "@jupyter-widgets/base", "overflow": null, "_model_module_version": "1.2.0", "_view_count": null, "flex_flow": null, "width": null, "min_width": null, "border": null, "align_items": null, "bottom": null, "_model_module": "@jupyter-widgets/base", "top": null, "grid_column": null, "overflow_y": null, "overflow_x": null, "grid_auto_flow": null, "grid_area": null, "grid_template_columns": null, "flex": null, "_model_name": "LayoutModel", "justify_items": null, "grid_row": null, "max_height": null, "align_content": null, "visibility": null, "align_self": null, "height": null, "min_height": null, "padding": null, "grid_auto_rows": null, "grid_gap": null, "max_width": null, "order": null, "_view_module_version": "1.2.0", "grid_template_areas": null, "object_position": null, "object_fit": null, "grid_auto_columns": null, "margin": null, "display": null, "left": null } }, "654d8a19b9f949c6bbdaf8b0875c931e": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "DescriptionStyleModel", "description_width": "", "_view_module": "@jupyter-widgets/base", "_model_module_version": "1.5.0", "_view_count": null, "_view_module_version": "1.2.0", "_model_module": "@jupyter-widgets/controls" } }, "896030c5d13b415aaa05032818d81a6e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, "right": null, "justify_content": null, "_view_module": "@jupyter-widgets/base", "overflow": null, "_model_module_version": "1.2.0", "_view_count": null, "flex_flow": null, "width": null, "min_width": null, "border": null, "align_items": null, "bottom": null, "_model_module": "@jupyter-widgets/base", "top": null, "grid_column": null, "overflow_y": null, "overflow_x": null, "grid_auto_flow": null, "grid_area": null, "grid_template_columns": null, "flex": null, "_model_name": "LayoutModel", "justify_items": null, "grid_row": null, "max_height": null, "align_content": null, "visibility": null, "align_self": null, "height": null, "min_height": null, "padding": null, "grid_auto_rows": null, "grid_gap": null, "max_width": null, "order": null, "_view_module_version": "1.2.0", "grid_template_areas": null, "object_position": null, "object_fit": null, "grid_auto_columns": null, "margin": null, "display": null, "left": null } } } } }, "cells": [ { "cell_type": "markdown", "metadata": { "id": "view-in-github", "colab_type": "text" }, "source": [ "" ] }, { "cell_type": "markdown", "metadata": { "id": "t6MPjfT5NrKQ" }, "source": [ "\n", "\n", "\n", "This is the **official YOLOv5 🚀 notebook** by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" ] }, { "cell_type": "markdown", "metadata": { "id": "7mGmQbAO5pQb" }, "source": [ "# Setup\n", "\n", "Clone repo, install dependencies and check PyTorch and GPU." ] }, { "cell_type": "code", "metadata": { "id": "wbvMlHd_QwMG", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "4d67116a-43e9-4d84-d19e-1edd83f23a04" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", "%cd yolov5\n", "%pip install -qr requirements.txt # install dependencies\n", "\n", "import torch\n", "from IPython.display import Image, clear_output # to display images\n", "\n", "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], "execution_count": null, "outputs": [ { "output_type": "stream", "text": [ "Setup complete. Using torch 1.9.0+cu102 (Tesla V100-SXM2-16GB)\n" ], "name": "stdout" } ] }, { "cell_type": "markdown", "metadata": { "id": "4JnkELT0cIJg" }, "source": [ "# 1. Inference\n", "\n", "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", "\n", "```shell\n", "python detect.py --source 0 # webcam\n", " file.jpg # image \n", " file.mp4 # video\n", " path/ # directory\n", " path/*.jpg # glob\n", " 'https://youtu.be/NUsoVlDFqZg' # YouTube\n", " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", "```" ] }, { "cell_type": "code", "metadata": { "id": "zR9ZbuQCH7FX", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "8b728908-81ab-4861-edb0-4d0c46c439fb" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", "Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], "execution_count": null, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images/, imgsz=640, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False\n", "YOLOv5 🚀 v5.0-367-g01cdb76 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients\n", "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 1 fire hydrant, Done. (0.007s)\n", "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.007s)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n", "Done. (0.091s)\n" ], "name": "stdout" } ] }, { "cell_type": "markdown", "metadata": { "id": "hkAzDWJ7cWTr" }, "source": [ " \n", "" ] }, { "cell_type": "markdown", "metadata": { "id": "0eq1SMWl6Sfn" }, "source": [ "# 2. Validate\n", "Validate a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." ] }, { "cell_type": "markdown", "metadata": { "id": "eyTZYGgRjnMc" }, "source": [ "## COCO val2017\n", "Download [COCO val 2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L14) dataset (1GB - 5000 images), and test model accuracy." ] }, { "cell_type": "code", "metadata": { "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", "height": 48, "referenced_widgets": [ "484511f272e64eab8b42e68dac5f7a66", "78cceec059784f2bb36988d3336e4d56", "ab93d8b65c134605934ff9ec5efb1bb6", "30df865ded4c434191bce772c9a82f3a", "20cdc61eb3404f42a12b37901b0d85fb", "2d7239993a9645b09b221405ac682743", "17b5a87f92104ec7ab96bf507637d0d2", "2358bfb2270247359e94b066b3cc3d1f", "3e984405db654b0b83b88b2db08baffd", "654d8a19b9f949c6bbdaf8b0875c931e", "896030c5d13b415aaa05032818d81a6e" ] }, "outputId": "7e6f5c96-c819-43e1-cd03-d3b9878cf8de" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], "execution_count": null, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "484511f272e64eab8b42e68dac5f7a66", "version_minor": 0, "version_major": 2 }, "text/plain": [ " 0%| | 0.00/780M [00:00, ?B/s]" ] }, "metadata": { "tags": [] } } ] }, { "cell_type": "code", "metadata": { "id": "X58w8JLpMnjH", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "3dd0e2fc-aecf-4108-91b1-6392da1863cb" }, "source": [ "# Run YOLOv5x on COCO val2017\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], "execution_count": null, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True\n", "YOLOv5 🚀 v5.0-367-g01cdb76 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n", "100% 168M/168M [00:08<00:00, 20.6MB/s]\n", "\n", "Fusing layers... \n", "Model Summary: 476 layers, 87730285 parameters, 0 gradients\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2749.96it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../datasets/coco/val2017.cache\n", " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:08<00:00, 2.28it/s]\n", " all 5000 36335 0.746 0.626 0.68 0.49\n", "Speed: 0.1ms pre-process, 5.1ms inference, 1.6ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", "Done (t=0.46s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", "DONE (t=4.94s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", "DONE (t=83.60s).\n", "Accumulating evaluation results...\n", "DONE (t=13.22s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.629\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.827\n", "Results saved to \u001b[1mruns/val/exp\u001b[0m\n" ], "name": "stdout" } ] }, { "cell_type": "markdown", "metadata": { "id": "rc_KbFk0juX2" }, "source": [ "## COCO test-dev2017\n", "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (**20,000 images, no labels**). Results are saved to a `*.json` file which should be **zipped** and submitted to the evaluation server at https://competitions.codalab.org/competitions/20794." ] }, { "cell_type": "code", "metadata": { "id": "V0AJnSeCIHyJ" }, "source": [ "# Download COCO test-dev2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n", "!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n", "%mv ./test2017 ../coco/images # move to /coco" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "29GJXAP_lPrt" }, "source": [ "# Run YOLOv5s on COCO test-dev2017 using --task test\n", "!python val.py --weights yolov5s.pt --data coco.yaml --task test" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "ZY2VXXXu74w5" }, "source": [ "# 3. Train\n", "\n", "
\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "