{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "source": [
        "## 1、convert_to_yolo.py\n",
        "### 将数据集转换为YOLO格式以用于训练"
      ],
      "metadata": {
        "id": "HMZS9PuGkaSy"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import os\n",
        "#导入os模块\n",
        "from datasets import load_dataset\n",
        "#从datasets模块中导入load_dataset方法\n",
        "\n",
        "def dump_images_and_labels(data,split):\n",
        "    data = data[split]\n",
        "    #获取数据集中指定的分割的数据\n",
        "    for i,example in enumerate(data):\n",
        "        image = example[\"image\"]\n",
        "        #获取图片数据\n",
        "        labels = example[\"litter\"][\"label\"]\n",
        "        #获取标签数据\n",
        "        bboxes = example[\"litter\"][\"bbox\"]\n",
        "        #获取边界框数据\n",
        "        targets = []\n",
        "        for label,box in zip(labels,bboxes):\n",
        "            targets.append(f\"{label} {box[0]} {box[1]} {box[2]} {box[3]}\")\n",
        "            #拼接目标信息字符串\n",
        "        with open(f\"datasets/labels/{split}/{i}.txt\",\"w\") as f:\n",
        "            for target in targets:\n",
        "                f.write(target + \"\\n\")\n",
        "                #将目标信息写入文件\n",
        "        image.save(f\"datasets/images/{split}/{i}.png\")\n",
        "        #保存图片文件\n",
        "\n",
        "if __name__ == \"__main__\":\n",
        "    # 判断是否直接运行脚本\n",
        "\n",
        "    #加载数据集\n",
        "    dataset = load_dataset(\"kili-technology/plastic_in_river\")\n",
        "    print(\"dataset =\", dataset)\n",
        "\n",
        "    os.makedirs(\"datasets/images/train\", exist_ok=True)\n",
        "    print(\"datasets/images/train 目录已创建\")\n",
        "\n",
        "    os.makedirs(\"datasets/images/validation\", exist_ok=True)\n",
        "    print(\"datasets/images/validation 目录已创建\")\n",
        "\n",
        "    os.makedirs(\"datasets/labels/train\", exist_ok=True)\n",
        "    print(\"datasets/labels/train 目录已创建\")\n",
        "\n",
        "    os.makedirs(\"datasets/labels/validation\", exist_ok=True)\n",
        "    print(\"datasets/labels/validation 目录已创建\")\n",
        "\n",
        "    dump_images_and_labels(dataset,\"train\")\n",
        "    print(\"train 数据已处理\")\n",
        "\n",
        "    dump_images_and_labels(dataset,\"validation\")\n",
        "    print(\"validation 数据已处理\")"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "p4jdc9YZkVgr",
        "outputId": "7f3cb67b-12eb-4f66-b993-0d1db413d12f"
      },
      "execution_count": 5,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "/usr/local/lib/python3.10/dist-packages/datasets/load.py:1454: FutureWarning: The repository for kili-technology/plastic_in_river contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/kili-technology/plastic_in_river\n",
            "You can avoid this message in future by passing the argument `trust_remote_code=True`.\n",
            "Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.\n",
            "  warnings.warn(\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "dataset = DatasetDict({\n",
            "    train: Dataset({\n",
            "        features: ['image', 'litter'],\n",
            "        num_rows: 3407\n",
            "    })\n",
            "    test: Dataset({\n",
            "        features: ['image', 'litter'],\n",
            "        num_rows: 427\n",
            "    })\n",
            "    validation: Dataset({\n",
            "        features: ['image', 'litter'],\n",
            "        num_rows: 425\n",
            "    })\n",
            "})\n",
            "datasets/images/train 目录已创建\n",
            "datasets/images/validation 目录已创建\n",
            "datasets/labels/train 目录已创建\n",
            "datasets/labels/validation 目录已创建\n",
            "train 数据已处理\n",
            "validation 数据已处理\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## 2、train.py\n",
        "### 使用预训练模型并给定训练参数,对模型进行微调"
      ],
      "metadata": {
        "id": "KNWeEsUKmnKx"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "# 导入YOLO模型\n",
        "from ultralytics import YOLO\n",
        "\n",
        "# 加载模型\n",
        "model = YOLO('yolov8m.pt')  # 加载预训练模型（推荐用于训练）\n",
        "\n",
        "# 训练模型\n",
        "model.train(\n",
        "    data='plastic.yaml',  # 数据集配置文件\n",
        "    epochs=20,  # 训练轮数\n",
        "    imgsz=(1280, 720),  # 图像尺寸\n",
        "    batch=4,  # 批次大小\n",
        "    optimizer=\"Adam\",  # 优化器\n",
        "    lr0=1e-3,  # 初始学习率\n",
        ")\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "QxlbC8Awm0lb",
        "outputId": "fef0200c-8c49-4052-be28-39cd1e3b35a5"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Ultralytics YOLOv8.1.16 🚀 Python-3.10.12 torch-2.1.0+cu121 CPU (Intel Xeon 2.20GHz)\n",
            "\u001b[34m\u001b[1mengine/trainer: \u001b[0mtask=detect, mode=train, model=yolov8m.pt, data=plastic.yaml, epochs=20, time=None, patience=100, batch=4, imgsz=(1280, 720), save=True, save_period=-1, cache=False, device=None, workers=8, project=None, name=train4, exist_ok=False, pretrained=True, optimizer=Adam, verbose=True, seed=0, deterministic=True, single_cls=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, fraction=1.0, profile=False, freeze=None, multi_scale=False, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, vid_stride=1, stream_buffer=False, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, embed=None, show=False, save_frames=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, show_boxes=True, line_width=None, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=None, workspace=4, nms=False, lr0=0.001, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0, auto_augment=randaugment, erasing=0.4, crop_fraction=1.0, cfg=None, tracker=botsort.yaml, save_dir=runs/detect/train4\n",
            "Overriding model.yaml nc=80 with nc=4\n",
            "\n",
            "                   from  n    params  module                                       arguments                     \n",
            "  0                  -1  1      1392  ultralytics.nn.modules.conv.Conv             [3, 48, 3, 2]                 \n",
            "  1                  -1  1     41664  ultralytics.nn.modules.conv.Conv             [48, 96, 3, 2]                \n",
            "  2                  -1  2    111360  ultralytics.nn.modules.block.C2f             [96, 96, 2, True]             \n",
            "  3                  -1  1    166272  ultralytics.nn.modules.conv.Conv             [96, 192, 3, 2]               \n",
            "  4                  -1  4    813312  ultralytics.nn.modules.block.C2f             [192, 192, 4, True]           \n",
            "  5                  -1  1    664320  ultralytics.nn.modules.conv.Conv             [192, 384, 3, 2]              \n",
            "  6                  -1  4   3248640  ultralytics.nn.modules.block.C2f             [384, 384, 4, True]           \n",
            "  7                  -1  1   1991808  ultralytics.nn.modules.conv.Conv             [384, 576, 3, 2]              \n",
            "  8                  -1  2   3985920  ultralytics.nn.modules.block.C2f             [576, 576, 2, True]           \n",
            "  9                  -1  1    831168  ultralytics.nn.modules.block.SPPF            [576, 576, 5]                 \n",
            " 10                  -1  1         0  torch.nn.modules.upsampling.Upsample         [None, 2, 'nearest']          \n",
            " 11             [-1, 6]  1         0  ultralytics.nn.modules.conv.Concat           [1]                           \n",
            " 12                  -1  2   1993728  ultralytics.nn.modules.block.C2f             [960, 384, 2]                 \n",
            " 13                  -1  1         0  torch.nn.modules.upsampling.Upsample         [None, 2, 'nearest']          \n",
            " 14             [-1, 4]  1         0  ultralytics.nn.modules.conv.Concat           [1]                           \n",
            " 15                  -1  2    517632  ultralytics.nn.modules.block.C2f             [576, 192, 2]                 \n",
            " 16                  -1  1    332160  ultralytics.nn.modules.conv.Conv             [192, 192, 3, 2]              \n",
            " 17            [-1, 12]  1         0  ultralytics.nn.modules.conv.Concat           [1]                           \n",
            " 18                  -1  2   1846272  ultralytics.nn.modules.block.C2f             [576, 384, 2]                 \n",
            " 19                  -1  1   1327872  ultralytics.nn.modules.conv.Conv             [384, 384, 3, 2]              \n",
            " 20             [-1, 9]  1         0  ultralytics.nn.modules.conv.Concat           [1]                           \n",
            " 21                  -1  2   4207104  ultralytics.nn.modules.block.C2f             [960, 576, 2]                 \n",
            " 22        [15, 18, 21]  1   3778012  ultralytics.nn.modules.head.Detect           [4, [192, 384, 576]]          \n",
            "Model summary: 295 layers, 25858636 parameters, 25858620 gradients, 79.1 GFLOPs\n",
            "\n",
            "Transferred 469/475 items from pretrained weights\n",
            "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/detect/train4', view at http://localhost:6006/\n",
            "Freezing layer 'model.22.dfl.conv.weight'\n",
            "WARNING ⚠️ updating to 'imgsz=1280'. 'train' and 'val' imgsz must be an integer, while 'predict' and 'export' imgsz may be a [h, w] list or an integer, i.e. 'yolo export imgsz=640,480' or 'yolo export imgsz=640'\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/labels/train.cache... 3407 images, 238 backgrounds, 0 corrupt: 100%|██████████| 3407/3407 [00:00<?, ?it/s]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/labels/validation.cache... 425 images, 42 backgrounds, 0 corrupt: 100%|██████████| 425/425 [00:00<?, ?it/s]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Plotting labels to runs/detect/train4/labels.jpg... \n",
            "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001, momentum=0.937) with parameter groups 77 weight(decay=0.0), 84 weight(decay=0.0005), 83 bias(decay=0.0)\n",
            "\u001b[34m\u001b[1mTensorBoard: \u001b[0mmodel graph visualization added ✅\n",
            "Image sizes 1280 train, 1280 val\n",
            "Using 0 dataloader workers\n",
            "Logging results to \u001b[1mruns/detect/train4\u001b[0m\n",
            "Starting training for 20 epochs...\n",
            "\n",
            "      Epoch    GPU_mem   box_loss   cls_loss   dfl_loss  Instances       Size\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "\r  0%|          | 0/852 [00:00<?, ?it/s]"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## 3、predict.py\n",
        "### 使用预训练好的YOLO模型对新图像进行预测和可视化"
      ],
      "metadata": {
        "id": "J9qB9u97m4BL"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "# 从ultralytics库中导入YOLO模型\n",
        "from ultralytics import YOLO\n",
        "# 从datasets库中导入load_dataset方法用于加载数据集\n",
        "from datasets import laod_dataset\n",
        "# 从PIL图像处理库中导入Image类\n",
        "from PIL import Image\n",
        "\n",
        "# 使用load_dataset方法加载塑料垃圾检测数据集\n",
        "dataset =load_dataset(\"kili-technology/plastic_in_river\")\n",
        "# 从数据集中取出一张测试图片\n",
        "img = dataset[\"test\"][0][\"image\"]\n",
        "# 加载已训练好的YOLO检测模型\n",
        "model =YOLO(\"runs/detect/train/weights/best.pt\")\n",
        "# 使用模型对图片进行预测,结果保存在res变量中\n",
        "res = model.predict(img)[0]\n",
        "# 打印出预测框坐标\n",
        "print(res.boxes)\n",
        "# 可视化预测结果\n",
        "res = res.plot(line_width=1)\n",
        "# 将RGB通道顺序翻转成BGR顺序以用于opencv显示\n",
        "res = res[:, :, ::-1]\n",
        "# 将预测的可视化图像转换为PIL Image对象\n",
        "res = Image.fromarray(res)\n",
        "# 保存预测visualization图像\n",
        "res.save(\"output.png\")"
      ],
      "metadata": {
        "id": "wePy8AiUnGaD"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "!pwd"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "qsJ5-0ifCknT",
        "outputId": "2842491c-583b-4331-8f90-9c9fc4707700"
      },
      "execution_count": 9,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "/content\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [],
      "metadata": {
        "id": "2Ft4yC1xClyM"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}