{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "1e07826a-4e11-445c-a23d-67793e9852c3",
   "metadata": {},
   "source": [
    "## 0. 执行前说明\n",
    "- 需要修改的参数\n",
    "    - root：sar_aircraft项目原始数据的根目录，和current_path(也就是代码所在路径)是同级的\n",
    "    - yolo_datasets_path ：yolo对应数据集的路径\n",
    "- 原始数据目录结构\n",
    "    - 目录结构：\n",
    "            - current_path\n",
    "            - sar_aircraft\n",
    "                - JPEGImages\n",
    "                - Annotations\n",
    "      注：原始数据需要解压"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "73757ffa-3119-45aa-9fda-b03b13f66beb",
   "metadata": {},
   "source": [
    "## 1. 读取分类文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "0badd845-4f04-4fb2-b42a-c7130b3e262a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import random\n",
    "import shutil # 处理文件和目录\n",
    "from xml.etree import ElementTree\n",
    "from sklearn.model_selection import train_test_split\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "02528fec-c90f-41a9-b766-3470a1295cd8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(16463, 2)"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data = pd.read_csv(filepath_or_buffer=\"output.csv\")\n",
    "data.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2a7dab09-57ed-4002-a01c-a5d856a71e13",
   "metadata": {},
   "source": [
    "## 2. 定义字典"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "8203e1a1-a111-493f-b40a-29592bb9e848",
   "metadata": {},
   "outputs": [],
   "source": [
    "label2idx = {\"A330\": 0, \n",
    "             \"A320/321\": 1, \n",
    "             \"A220\": 2, \n",
    "             \"ARJ21\":3, \n",
    "             \"Boeing737\": 4, \n",
    "             \"Boeing787\":5, \n",
    "             \"other\":6}\n",
    "# 反向标签\n",
    "idx2label = {idx: label for label, idx in label2idx.items()}"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c49dd79b-263a-481a-83b2-b6266200bcc9",
   "metadata": {},
   "source": [
    "## 3. 划分A330类的数据集"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "34f54aba-e98a-44d2-a3e8-58fd22da6165",
   "metadata": {},
   "source": [
    "思路：\n",
    "- 找出所有包含A330的图像作为样本， 并随机找出相同数量的不包含A330的图像做负样本\n",
    "- 分为train（75%），val（20%）， test（5%）\n",
    "- test做最终验证，做为考题"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "37405f8b-d63a-491c-98f4-9c0ab56022f0",
   "metadata": {},
   "source": [
    "### 3.1 建立A330二分类的目录"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "938ebac3-dd8d-49a5-beec-3c774823fa41",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 原始数据的目录\n",
    "current_path = os.getcwd()\n",
    "root = os.path.join(current_path, \"sar_aircraft_source\")\n",
    "root_images_path = os.path.join(root, \"JPEGImages\")\n",
    "root_labels_path = os.path.join(root, \"Annotations\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "id": "02be5272-4d49-4cdf-8f3f-b62070a4a79a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "项目目录已创建\n"
     ]
    }
   ],
   "source": [
    "# 数据划分：定义训练集和验证集占原始数据的比例\n",
    "train_ratio = 0.75\n",
    "val_ratio = 0.2\n",
    "test_ratio = 0.05\n",
    "\n",
    "# 1. 定义YOLO的路径和数据集的路径\n",
    "yolo_datasets_path = os.path.join(\"E:\\\\\", \"datasets\")\n",
    "sar_aircraft_path = os.path.join(yolo_datasets_path, \"sar_aircraft\")\n",
    "# 路径建好了，就还用这个吧。\n",
    "classification_path = os.path.join(sar_aircraft_path, \"two_classification_nons\")\n",
    "\n",
    "# 2. 在yolo的dataset中创建数据集目录，有的话直接删除，重新创建\n",
    "try:\n",
    "    # shutil.rmtree(classification_path)\n",
    "    # os.makedirs(classification_path)\n",
    "    print(\"项目目录已创建\")\n",
    "except FileNotFoundError:\n",
    "    os.makedirs(classification_path)\n",
    "    print(\"项目目录已创建\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "5e8001ae-22f4-43cb-b66e-765176681e01",
   "metadata": {},
   "outputs": [],
   "source": [
    "# yolo需要的目录结构\n",
    "images_labels_list = [\"images\", \"labels\"]\n",
    "train_val_test_list = [\"train\", \"val\", \"test\"]\n",
    "\n",
    "# 创建分类的目录\n",
    "cls_path = os.path.join(classification_path, \"class_012\")\n",
    "os.makedirs(cls_path)\n",
    "for path_0 in images_labels_list:\n",
    "    images_labels_path = os.path.join(cls_path, path_0)\n",
    "    os.makedirs(images_labels_path)\n",
    "    for path_1 in train_val_test_list:\n",
    "        train_val_test_path = os.path.join(images_labels_path, path_1)\n",
    "        os.makedirs(train_val_test_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7785a734-0c2a-44d0-ab3b-dfb54c495ae8",
   "metadata": {},
   "source": [
    "### 3.2 找出A330的图像的正反例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "450ce4a1-4568-42b4-a31e-60ccc5130b6a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>文件名</th>\n",
       "      <th>第一列内容</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0000001.txt</td>\n",
       "      <td>5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0000001.txt</td>\n",
       "      <td>4</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0000002.txt</td>\n",
       "      <td>5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0000002.txt</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0000002.txt</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16458</th>\n",
       "      <td>0004368.txt</td>\n",
       "      <td>4</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16459</th>\n",
       "      <td>0004368.txt</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16460</th>\n",
       "      <td>0004368.txt</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16461</th>\n",
       "      <td>0004368.txt</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16462</th>\n",
       "      <td>0004368.txt</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>16463 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "               文件名  第一列内容\n",
       "0      0000001.txt      5\n",
       "1      0000001.txt      4\n",
       "2      0000002.txt      5\n",
       "3      0000002.txt      2\n",
       "4      0000002.txt      2\n",
       "...            ...    ...\n",
       "16458  0004368.txt      4\n",
       "16459  0004368.txt      2\n",
       "16460  0004368.txt      2\n",
       "16461  0004368.txt      2\n",
       "16462  0004368.txt      2\n",
       "\n",
       "[16463 rows x 2 columns]"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3b9db5ce-17ed-4006-9deb-3f89d72632f0",
   "metadata": {},
   "source": [
    "### 3.2 追加数据，把 1、2 类一起追加进去"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "3dfc4e7b-758c-4f5c-a0ac-f8067234e26c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "012类别的总数：5810\n",
      "包含类别012的图像个数：2556\n",
      "取出样本：2556\n"
     ]
    }
   ],
   "source": [
    "# 包含 0，1，2 类别的总数\n",
    "img_012_file = list(data[data[\"第一列内容\"].isin([0, 1, 2])][\"文件名\"])\n",
    "print(f\"012类别的总数：{len(img_012_file)}\")\n",
    "\n",
    "# 包含类别012的图像个数\n",
    "pos_img_012_file = list(set(img_012_file))\n",
    "# 包含类别012图像的数量\n",
    "pos_img_012_num = len(pos_img_012_file)\n",
    "print(f\"包含类别012的图像个数：{pos_img_012_num}\")\n",
    "\n",
    "print(f\"取出样本：{len(pos_img_012_file)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "id": "8cf92738-6c00-4642-aa60-cc2e9c87d5c6",
   "metadata": {},
   "outputs": [],
   "source": [
    "def getlabelsfilelist(file_path):\n",
    "    \"\"\"\n",
    "        获取labels文件列表\n",
    "    \"\"\"\n",
    "    labels_list = []\n",
    "    for root, _, files in os.walk(file_path):\n",
    "            for file in files:\n",
    "                if file.endswith('.txt'):\n",
    "                    labels_list.append(file)\n",
    "    return labels_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "472e4ad1-9cc7-4b83-8642-dec3842b7929",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(217, 15, 58)"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 读取已经划分好的 class_0 数据\n",
    "train_labels_0_all_path = os.path.join(classification_path, \"class_0\", \"labels\", \"train\")\n",
    "train_labels_0_all = getlabelsfilelist(train_labels_0_all_path)\n",
    "test_labels_0_all_path = os.path.join(classification_path, \"class_0\", \"labels\", \"test\")\n",
    "test_labels_0_all = getlabelsfilelist(test_labels_0_all_path)\n",
    "val_labels_0_all_path = os.path.join(classification_path, \"class_0\", \"labels\", \"val\")\n",
    "val_labels_0_all = getlabelsfilelist(val_labels_0_all_path)\n",
    "\n",
    "len(train_labels_0_all), len(test_labels_0_all), len(val_labels_0_all),"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "29972b1b-2794-4e51-ac96-cfc2ab572766",
   "metadata": {},
   "outputs": [],
   "source": [
    "all_files = set(data[\"文件名\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "id": "66fa925a-748c-4d9b-b27f-451c7c0f0421",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "4368"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(all_files)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "id": "b26a0796-5762-44dc-922d-c97e81ec604c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第一轮没有分出去的文件，验证用，没有实际用处。\n",
    "not_in_class0_files = [file for file in all_files if file not in train_labels_0_all + test_labels_0_all + val_labels_0_all]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "id": "dab10992-ee01-40e1-89d7-114739025601",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "4078"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(not_in_class0_files)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "id": "5be1ad44-6770-4537-ad09-3038135200b7",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3788"
      ]
     },
     "execution_count": 52,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "4368 - 434 - 30 - 116"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "078a9ba9-3016-4994-b213-80418ccd72fc",
   "metadata": {},
   "source": [
    "### 3.3 切分数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "id": "360e0db3-6417-4be1-9f6b-71664a5a691b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1812"
      ]
     },
     "execution_count": 53,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 算出没有分出去的文件，用于补充删除冲突的空缺。\n",
    "# 确保 pos_img_012_file 是列表\n",
    "if isinstance(pos_img_012_file, set):\n",
    "    pos_img_012_file = list(pos_img_012_file)\n",
    "not_in_files = [file for file in all_files if file not in pos_img_012_file]\n",
    "len(not_in_files)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "bb08ea90-f861-4c9c-aff0-38119f1da210",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 转list\n",
    "pos_img_0_file = list(pos_img_012_file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "id": "7d176c13-ac4d-4e7f-9ebf-c554a9e2740b",
   "metadata": {},
   "outputs": [],
   "source": [
    "pos_train_files, pos_test_files = train_test_split(pos_img_0_file, test_size=test_ratio, shuffle=True, random_state=42)\n",
    "pos_train_files, pos_val_files = train_test_split(pos_train_files, test_size=(val_ratio / (val_ratio + train_ratio)), shuffle=True, random_state=42)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "id": "0163d066-6489-44d4-994a-7858988ebee4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1916 512 128\n",
      "1916 512 128\n"
     ]
    }
   ],
   "source": [
    "# # 查看形状\n",
    "print(len(pos_train_files), len(pos_val_files), len(pos_test_files))\n",
    "print(len(pos_train_files) ,\n",
    "      len(pos_val_files),\n",
    "      len(pos_test_files)\n",
    "     )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "id": "a85a8d9f-e6d7-4a07-bb6b-7bf28c60fde3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "划分的训练集中包含 0 类的样本158\n",
      "划分的训练集中不包含 0 类的样本1758\n",
      "划分的测试集中包含 0 类的样本0\n",
      "划分的测试集中不包含 0 类的样本128\n",
      "划分的验证集中包含 0 类的样本7\n",
      "划分的验证集中不包含 0 类的样本505\n"
     ]
    }
   ],
   "source": [
    "# 根据新样本与0类样本的交集，调整训练集、测试集、验证集\n",
    "# 训练集中包含 0 类的样本\n",
    "pos_train_0_files = [file for file in pos_train_files if file in train_labels_0_all]\n",
    "print(f\"划分的训练集中包含 0 类的样本{len(pos_train_0_files)}\")\n",
    "# 从训练集中删除0类样本\n",
    "pos_train_no0_files = [file for file in pos_train_files if file not in train_labels_0_all]\n",
    "print(f\"划分的训练集中不包含 0 类的样本{len(pos_train_no0_files)}\")\n",
    "\n",
    "# 测试集中包含 0 类的样本\n",
    "pos_test_0_files = [file for file in pos_test_files if file in test_labels_0_all]\n",
    "print(f\"划分的测试集中包含 0 类的样本{len(pos_test_0_files)}\")\n",
    "# 从训练集中删除0类样本\n",
    "pos_test_no0_files = [file for file in pos_test_files if file not in test_labels_0_all]\n",
    "print(f\"划分的测试集中不包含 0 类的样本{len(pos_test_no0_files)}\")\n",
    "\n",
    "# 验证集中包含 0 类的样本\n",
    "pos_val_0_files = [file for file in pos_val_files if file in val_labels_0_all]\n",
    "print(f\"划分的验证集中包含 0 类的样本{len(pos_val_0_files)}\")\n",
    "# 从训练集中删除0类样本\n",
    "pos_val_no0_files = [file for file in pos_val_files if file not in val_labels_0_all]\n",
    "print(f\"划分的验证集中不包含 0 类的样本{len(pos_val_no0_files)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "id": "44959b95-d2fc-44dc-bdb6-983ab6f00f6d",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "已从训练集中删除 59 个0类样本。\n",
      "Number of elements removed from pos_train_files: 59\n",
      "Number of elements removed from pos_val_files: 47\n",
      "Number of elements removed from pos_test_files: 12\n",
      "1916\n"
     ]
    }
   ],
   "source": [
    "# 根据新样本与0类样本的交集，调整训练集、测试集、验证集\n",
    "# 计算0类使用的训练集在 新的数据集中是否被分到了验证集和测试集中，有则删除，记录删除个数\n",
    "# 初始化计数器\n",
    "pos_count_val = 0\n",
    "pos_count_test = 0\n",
    "pos_train_no0_files_add = []\n",
    "\n",
    "# 根据训练集的0样本 已在训练集中的个数差，从未划包含0类的训练集中删除对应个数。\n",
    "train_num_diff = len(train_labels_0_all) - len(pos_train_0_files)\n",
    "# 从训练集中删除 train_num_diff 个样本\n",
    "if train_num_diff > 0:\n",
    "    # 随机选择要删除的0类样本\n",
    "    to_remove = random.sample(pos_train_no0_files, min(train_num_diff, len(pos_train_0_files)))\n",
    "    \n",
    "    # 从 pos_train_0_files 中删除这些样本\n",
    "    for file in to_remove:\n",
    "        pos_train_no0_files.remove(file)\n",
    "    \n",
    "    print(f\"已从训练集中删除 {len(to_remove)} 个0类样本。\")\n",
    "else:\n",
    "    print(\"不需要删除0类样本，因为当前0类样本数量已经足够少。\")\n",
    "\n",
    "for labels_item in train_labels_0_all:\n",
    "    # 检查训练集的0样本 是否在测试集中，如果存在则删除。\n",
    "    if labels_item in pos_val_files:\n",
    "        pos_val_files.remove(labels_item)\n",
    "        pos_count_val += 1\n",
    "        if pos_train_no0_files:\n",
    "            new_item = pos_train_no0_files.pop(0)  # 从训练集中取出一个元素\n",
    "            pos_val_files.append(new_item)  # 将取出的元素添加到验证集中\n",
    "            # 从未分类中取出一个作为补充\n",
    "            new_item1 = not_in_files.pop(0)\n",
    "            pos_train_no0_files_add.append(new_item1)\n",
    "    # 检查训练集的0样本 是否在验证集中，如果存在则删除。\n",
    "    if labels_item in pos_test_files:\n",
    "        pos_test_files.remove(labels_item)\n",
    "        pos_count_test += 1\n",
    "        if pos_train_no0_files:\n",
    "            new_item = pos_train_no0_files.pop(0)  # 从训练集中取出一个元素\n",
    "            pos_test_files.append(new_item)  # 将取出的元素添加到测试集中\n",
    "            # 从未分类中取出一个作为补充\n",
    "            new_item1 = not_in_files.pop(0)\n",
    "            pos_train_no0_files_add.append(new_item1)\n",
    "\n",
    "# 输出删除的元素数量\n",
    "print(\"Number of elements removed from pos_train_files:\", train_num_diff)\n",
    "print(\"Number of elements removed from pos_val_files:\", pos_count_val)\n",
    "print(\"Number of elements removed from pos_test_files:\", pos_count_test)\n",
    "\n",
    "pos_train_files = train_labels_0_all + pos_train_no0_files + pos_train_no0_files_add\n",
    "print(len(pos_train_files))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "id": "2675b9d9-76e8-4927-8965-4287abcf1559",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1753"
      ]
     },
     "execution_count": 59,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 未分类的数据\n",
    "len(not_in_files)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "47537019-5e65-4661-a168-3bbe949a6de5",
   "metadata": {},
   "source": [
    "### 3.4 转换标签文件格式，从xml转换到txt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "id": "29d8699f-31c5-4ea5-b6c4-5b4cc5d3439d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def xml_transfer_yolo_bak(source_file_name, object_folder, file):\n",
    "    \"\"\"\n",
    "        定义转换格式的函数\n",
    "        把xml（voc）格式转化为txt（yolo）格式\n",
    "    \"\"\"\n",
    "    tree = ElementTree.parse(source=source_file_name)\n",
    "    root = tree.getroot()\n",
    "\n",
    "    # 图像的高度和宽度\n",
    "    img_width = int(root.find(path=\"size\").find(path=\"width\").text)\n",
    "    img_height = int(root.find(path=\"size\").find(path=\"height\").text)\n",
    "\n",
    "    # 转化标注信息\n",
    "    # finaly_file_name = os.path.splitext(file)[0] + \".txt\"\n",
    "    with open(file=os.path.join(object_folder, file), mode=\"w\", encoding=\"utf8\") as f:\n",
    "        # 遍历每个目标\n",
    "        # print(finaly_file_name)\n",
    "        for obj in root.findall(path=\"object\"):\n",
    "            name = obj.find(path=\"name\").text\n",
    "            if name in [\"A330\", \"A320/321\", \"A220\"]:\n",
    "                cls_id = label2idx.get(name)\n",
    "                xmin = int(obj.find(path=\"bndbox\").find(path=\"xmin\").text)\n",
    "                ymin = int(obj.find(path=\"bndbox\").find(path=\"ymin\").text)\n",
    "                xmax = int(obj.find(path=\"bndbox\").find(path=\"xmax\").text)\n",
    "                ymax = int(obj.find(path=\"bndbox\").find(path=\"ymax\").text)\n",
    "    \n",
    "                \"\"\"\n",
    "                    开始标注转化\n",
    "                    - yolo需要的是相对坐标，也就是相对于原始图像的比例，这样当图像resize到不同\n",
    "                    尺寸时，相对坐标所标注的框位置不变，可以更好的应对不同尺寸的图像\n",
    "                \"\"\"\n",
    "                # 1. 中心点坐标\n",
    "                # print(type(xmin), type(xmax), type(img_width))\n",
    "                x_center = round(number=(xmin + xmax) / 2 / img_width, ndigits=6)\n",
    "                y_center = round(number=(ymin + ymax) / 2 / img_height, ndigits=6)\n",
    "                box_width = round(number=(xmax - xmin) / img_width, ndigits=6)\n",
    "                box_height = round(number=(ymax - ymin) / img_height, ndigits=6)\n",
    "                print(cls_id, x_center, y_center, box_width, box_height, sep=\" \", end=\"\\n\", file=f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "e40c5f3d-e54c-4acf-a746-b17ca591c50e",
   "metadata": {},
   "outputs": [],
   "source": [
    "def xml_transfer_yolo(source_file_name, object_folder, file):\n",
    "    \"\"\"\n",
    "        定义转换格式的函数\n",
    "        把xml（voc）格式转化为txt（yolo）格式\n",
    "    \"\"\"\n",
    "    tree = ElementTree.parse(source=source_file_name)\n",
    "    root = tree.getroot()\n",
    "\n",
    "    # 图像的高度和宽度\n",
    "    img_width = int(root.find(path=\"size\").find(path=\"width\").text)\n",
    "    img_height = int(root.find(path=\"size\").find(path=\"height\").text)\n",
    "\n",
    "    # 转化标注信息\n",
    "    # finaly_file_name = os.path.splitext(file)[0] + \".txt\"\n",
    "    with open(file=os.path.join(object_folder, file), mode=\"w\", encoding=\"utf8\") as f:\n",
    "        # 遍历每个目标\n",
    "        # print(finaly_file_name)\n",
    "        for obj in root.findall(path=\"object\"):\n",
    "            name = obj.find(path=\"name\").text\n",
    "            cls_id = label2idx.get(name)\n",
    "            xmin = int(obj.find(path=\"bndbox\").find(path=\"xmin\").text)\n",
    "            ymin = int(obj.find(path=\"bndbox\").find(path=\"ymin\").text)\n",
    "            xmax = int(obj.find(path=\"bndbox\").find(path=\"xmax\").text)\n",
    "            ymax = int(obj.find(path=\"bndbox\").find(path=\"ymax\").text)\n",
    "\n",
    "            \"\"\"\n",
    "                开始标注转化\n",
    "                - yolo需要的是相对坐标，也就是相对于原始图像的比例，这样当图像resize到不同\n",
    "                尺寸时，相对坐标所标注的框位置不变，可以更好的应对不同尺寸的图像\n",
    "            \"\"\"\n",
    "            # 1. 中心点坐标\n",
    "            # print(type(xmin), type(xmax), type(img_width))\n",
    "            x_center = round(number=(xmin + xmax) / 2 / img_width, ndigits=6)\n",
    "            y_center = round(number=(ymin + ymax) / 2 / img_height, ndigits=6)\n",
    "            box_width = round(number=(xmax - xmin) / img_width, ndigits=6)\n",
    "            box_height = round(number=(ymax - ymin) / img_height, ndigits=6)\n",
    "            if cls_id in [0, 1, 2]:\n",
    "                print(cls_id, x_center, y_center, box_width, box_height, sep=\" \", end=\"\\n\", file=f)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0fae049e-2f96-47f1-b3af-817669fd45b8",
   "metadata": {},
   "source": [
    "### 3.5 复制img和label到yolo数据集目录下"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "id": "6960fb4b-fa43-4464-b601-a364645b10f8",
   "metadata": {},
   "outputs": [],
   "source": [
    "def copy_files(files, images_path, labels_path):\n",
    "    \"\"\"\n",
    "        复制文件到yolo的数据集目录下\n",
    "            - files: 需要复制的图像文件[train_files, val_files, test_files]\n",
    "            - images_path: 需要复制到的图像路径 \n",
    "            - labels_path: 需要复制到的标签路径\n",
    "    \"\"\"\n",
    "    for file in files:\n",
    "        # 复制图像\n",
    "        shutil.copy(os.path.join(root_images_path, os.path.splitext(file)[0] + '.jpg'), images_path)\n",
    "        # 复制标签\n",
    "        label_file = os.path.splitext(file)[0] + '.xml'\n",
    "        xml_transfer_yolo(source_file_name=os.path.join(root_labels_path, label_file), object_folder=labels_path, file=file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "id": "af3ef7d5-5993-4a7f-bae8-40b747e5d668",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. 训练集\n",
    "copy_files(files=pos_train_files, images_path=os.path.join(classification_path, \"class_012\", \"images\", \"train\"), labels_path=os.path.join(classification_path, \"class_012\", \"labels\", \"train\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "1551e97d-43e2-4bcb-9300-df82b173c6b7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. 验证集\n",
    "copy_files(files=pos_val_files, images_path=os.path.join(classification_path, \"class_012\", \"images\", \"val\"), labels_path=os.path.join(classification_path, \"class_012\", \"labels\", \"val\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "id": "18cb1675-7c69-4e8e-a82c-c21b262da069",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. 训练集\n",
    "copy_files(files=pos_test_files, images_path=os.path.join(classification_path, \"class_012\", \"images\", \"test\"), labels_path=os.path.join(classification_path, \"class_012\", \"labels\", \"test\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "68dc8a39-3530-4d5a-b0a5-61ee02f9340d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "3cf3deeb-7c83-4c0c-b4f3-035c3e18c1c0",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(1916, 128, 512)"
      ]
     },
     "execution_count": 66,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_labels_012_all_path = os.path.join(classification_path, \"class_012\", \"labels\", \"train\")\n",
    "train_labels_012_all = getlabelsfilelist(train_labels_012_all_path)\n",
    "test_labels_012_all_path = os.path.join(classification_path, \"class_012\", \"labels\", \"test\")\n",
    "test_labels_012_all = getlabelsfilelist(test_labels_012_all_path)\n",
    "val_labels_012_all_path = os.path.join(classification_path, \"class_012\", \"labels\", \"val\")\n",
    "val_labels_012_all = getlabelsfilelist(val_labels_012_all_path)\n",
    "\n",
    "len(train_labels_012_all), len(test_labels_012_all), len(val_labels_012_all),"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "id": "f6a8d06a-9b9a-4ced-b3a5-8acdc0b09235",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1916\n",
      "128\n",
      "512\n"
     ]
    }
   ],
   "source": [
    "print(len(pos_train_files)), \n",
    "print(len(pos_test_files)), \n",
    "print(len(pos_val_files))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e05753f0-2cf3-4f71-9ae6-37dd1941ed9c",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
