{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Number of xml files: 128\n",
      "D:/fish/standard_fish128/json/data.json\n"
     ]
    }
   ],
   "source": [
    "# xml convert json\n",
    "import sys\n",
    "import os\n",
    "import json\n",
    "import xml.etree.ElementTree as ET\n",
    "import glob\n",
    "START_BOUNDING_BOX_ID = 1\n",
    "PRE_DEFINE_CATEGORIES = None\n",
    "# If necessary, pre-define category and its id\n",
    "#  PRE_DEFINE_CATEGORIES = {\"aeroplane\": 1, \"bicycle\": 2, \"bird\": 3, \"boat\": 4,\n",
    "#  \"bottle\":5, \"bus\": 6, \"car\": 7, \"cat\": 8, \"chair\": 9,\n",
    "#  \"cow\": 10, \"diningtable\": 11, \"dog\": 12, \"horse\": 13,\n",
    "#  \"motorbike\": 14, \"person\": 15, \"pottedplant\": 16,\n",
    "#  \"sheep\": 17, \"sofa\": 18, \"train\": 19, \"tvmonitor\": 20}\n",
    "def get(root, name):\n",
    "    vars = root.findall(name)\n",
    "    return vars\n",
    "def get_and_check(root, name, length):\n",
    "    vars = root.findall(name)\n",
    "    if len(vars) == 0:\n",
    "        raise ValueError(\"Can not find %s in %s.\" % (name, root.tag))\n",
    "    if length > 0 and len(vars) != length:\n",
    "        raise ValueError(\n",
    "            \"The size of %s is supposed to be %d, but is %d.\"\n",
    "            % (name, length, len(vars)))\n",
    "    if length == 1:\n",
    "        vars = vars[0]\n",
    "    return vars\n",
    "def get_filename_as_int(filename):\n",
    "    try:\n",
    "        filename = filename.replace(\"\\\\\", \"/\")\n",
    "        filename = os.path.splitext(os.path.basename(filename))[0]\n",
    "        return int(filename)\n",
    "    except:\n",
    "        raise ValueError(\"Filename %s is supposed to be an integer.\" % (filename))\n",
    "def get_categories(xml_files):\n",
    "    \"\"\"Generate category name to id mapping from a list of xml files.\n",
    "    Arguments:\n",
    "        xml_files {list} -- A list of xml file paths.\n",
    "    Returns:\n",
    "        dict -- category name to id mapping.\n",
    "    \"\"\"\n",
    "    classes_names = []\n",
    "    for xml_file in xml_files:\n",
    "        tree = ET.parse(xml_file)\n",
    "        root = tree.getroot()\n",
    "        for member in root.findall(\"object\"):\n",
    "            classes_names.append(member[0].text)\n",
    "    classes_names = list(set(classes_names))\n",
    "    classes_names.sort()\n",
    "    return {name: i for i, name in enumerate(classes_names)}\n",
    "def convert(xml_files, json_file):\n",
    "    json_dict = {\"images\": [], \"type\": \"instances\", \"annotations\": [], \"categories\": []}\n",
    "    if PRE_DEFINE_CATEGORIES is not None:\n",
    "        categories = PRE_DEFINE_CATEGORIES\n",
    "    else:\n",
    "        categories = get_categories(xml_files)\n",
    "    bnd_id = START_BOUNDING_BOX_ID\n",
    "    for xml_file in xml_files:\n",
    "        tree = ET.parse(xml_file)\n",
    "        root = tree.getroot()\n",
    "        path = get(root, \"path\")\n",
    "        if len(path) == 1:\n",
    "            filename = os.path.basename(path[0].text)\n",
    "        elif len(path) == 0:\n",
    "            filename = get_and_check(root, \"filename\", 1).text\n",
    "        else:\n",
    "            raise ValueError(\"%d paths found in %s\" % (len(path), xml_file))\n",
    "        ## The filename must be a number\n",
    "        image_id = get_filename_as_int(filename)\n",
    "        size = get_and_check(root, \"size\", 1)\n",
    "        width = int(get_and_check(size, \"width\", 1).text)\n",
    "        height = int(get_and_check(size, \"height\", 1).text)\n",
    "        image = {\n",
    "            \"file_name\": filename,\n",
    "            \"height\": height,\n",
    "            \"width\": width,\n",
    "            \"id\": image_id,\n",
    "        }\n",
    "        json_dict[\"images\"].append(image)\n",
    "        ## Currently we do not support segmentation.\n",
    "        #  segmented = get_and_check(root, 'segmented', 1).text\n",
    "        #  assert segmented == '0'\n",
    "        for obj in get(root, \"object\"):\n",
    "            category = get_and_check(obj, \"name\", 1).text\n",
    "            if category not in categories:\n",
    "                new_id = len(categories)\n",
    "                categories[category] = new_id\n",
    "            category_id = categories[category]\n",
    "            bndbox = get_and_check(obj, \"bndbox\", 1)\n",
    "            xmin = int(get_and_check(bndbox, \"xmin\", 1).text) - 1\n",
    "            ymin = int(get_and_check(bndbox, \"ymin\", 1).text) - 1\n",
    "            xmax = int(get_and_check(bndbox, \"xmax\", 1).text)\n",
    "            ymax = int(get_and_check(bndbox, \"ymax\", 1).text)\n",
    "            assert xmax > xmin\n",
    "            assert ymax > ymin\n",
    "            o_width = abs(xmax - xmin)\n",
    "            o_height = abs(ymax - ymin)\n",
    "            ann = {\n",
    "                \"area\": o_width * o_height,\n",
    "                \"iscrowd\": 0,\n",
    "                \"image_id\": image_id,\n",
    "                \"bbox\": [xmin, ymin, o_width, o_height],\n",
    "                \"category_id\": category_id,\n",
    "                \"id\": bnd_id,\n",
    "                \"ignore\": 0,\n",
    "                \"segmentation\": [],\n",
    "            }\n",
    "            json_dict[\"annotations\"].append(ann)\n",
    "            bnd_id = bnd_id + 1\n",
    "    for cate, cid in categories.items():\n",
    "        cat = {\"supercategory\": \"none\", \"id\": cid, \"name\": cate}\n",
    "        json_dict[\"categories\"].append(cat)\n",
    "    os.makedirs(os.path.dirname(json_file), exist_ok=True)\n",
    "    print(json_file)\n",
    "    json_fp = open(json_file, \"w\")\n",
    "    json_str = json.dumps(json_dict, indent = 4)\n",
    "    json_fp.write(json_str)\n",
    "    json_fp.close()\n",
    "    \n",
    "xml_path = r\"D:\\fish\\standard_fish128\\xml\"\n",
    "json_path = r'D:/fish/standard_fish128/json/data.json'\n",
    "xml_files = glob.glob(os.path.join(xml_path, \"*.xml\"))\n",
    "# If you want to do train/test split, you can pass a subset of xml files to convert function.\n",
    "print(\"Number of xml files: {}\".format(len(xml_files)))\n",
    "convert(xml_files, json_path)\n",
    "# print(\"Success: {}\".format(args.json_file))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Annotations D:\\fish\\standard_fish128\\json\\data.json: 100%|█████████████████████████| 963/963 [00:00<00:00, 4147.08it/s]\n"
     ]
    }
   ],
   "source": [
    "#### json convert txt\n",
    "import json\n",
    "import cv2\n",
    "import pandas as pd\n",
    "from PIL import Image\n",
    "import glob\n",
    "import os\n",
    "import shutil\n",
    "from pathlib import Path\n",
    "import numpy as np\n",
    "from PIL import ExifTags\n",
    "from tqdm import tqdm\n",
    "x = [0]\n",
    "def convert_coco_json(json_dir='', txt_dir='', x=[0], use_segments=False):\n",
    "    jsons = glob.glob(json_dir + '*.json')\n",
    "    coco80 = x\n",
    "    # Import json\n",
    "    for json_file in sorted(jsons):\n",
    "        fn = txt_dir\n",
    "        with open(json_file) as f:\n",
    "            data = json.load(f)\n",
    "        # Create image dict\n",
    "        images = {'%g' % x['id']: x for x in data['images']}\n",
    "        # Write labels file\n",
    "        for x in tqdm(data['annotations'], desc='Annotations %s' % json_file):\n",
    "            if x['iscrowd']:\n",
    "                continue\n",
    "            img = images['%g' % x['image_id']]\n",
    "            h, w, f = img['height'], img['width'], img['file_name']\n",
    "            # The COCO box format is [top left x, top left y, width, height]\n",
    "            box = np.array(x['bbox'], dtype=np.float64)\n",
    "            box[:2] += box[2:] / 2  # xy top-left corner to center\n",
    "            box[[0, 2]] /= w  # normalize x\n",
    "            box[[1, 3]] /= h  # normalize y\n",
    "            # Segments\n",
    "            segments = [j for i in x['segmentation'] for j in i]  # all segments concatenated\n",
    "            s = (np.array(segments).reshape(-1, 2) / np.array([w, h])).reshape(-1).tolist()\n",
    "            # Write\n",
    "            if box[2] > 0 and box[3] > 0:  # if w > 0 and h > 0\n",
    "#                 print(img)\n",
    "                line = coco80[x['category_id']], *(s if use_segments else box)  # cls, box or segments\n",
    "#                 print(fn+f)\n",
    "                with open(fn+f[:-4]+'.txt', 'a') as file:\n",
    "                    file.write(('%g ' * len(line)).rstrip() % line + '\\n')\n",
    "convert_coco_json(r\"D:\\fish\\standard_fish128\\json/\", r'D:\\fish\\standard_fish128\\txt/', x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#### xml convert txt\n",
    "import glob\n",
    "import os\n",
    "xml_path = r'D:\\fish\\standard_fish128\\xml/'\n",
    "txt_root = r'D:\\fish\\standard_fish128\\txt/'\n",
    "classes = ['fish']\n",
    "#定义从xml获取信息的函数\n",
    "def _read_anno(filename):\n",
    "    import xml.etree.ElementTree as ET\n",
    "    tree = ET.parse(filename)\n",
    "    #获取宽w和高h\n",
    "    a = tree.find('size')\n",
    "    w,h = [int(a.find('width').text),\n",
    "           int(a.find('height').text)]\n",
    "    objects = []\n",
    "    #这里是针对错误xml文件，图片的w和h都为0，这样的xml文件可以直接忽视，返回空列表\n",
    "    if w == 0:\n",
    "        return []\n",
    "    for obj in tree.findall('object'):\n",
    "    \t#获取name\n",
    "        name = obj.find('name').text\n",
    "        #修改label，这里是不同数据集大融合的关键\n",
    "        label = classes.index(name)\n",
    "\t\t#读取检测框的左上、右下角点的坐标\n",
    "        bbox = obj.find('bndbox')\n",
    "        x1, y1, x2, y2 = [int(bbox.find('xmin').text),\n",
    "                          int(bbox.find('ymin').text),\n",
    "                          int(bbox.find('xmax').text),\n",
    "                          int(bbox.find('ymax').text)]\n",
    "\t\t#这里也很关键，yolov5需要中心点以及宽和高的标注信息，并且进行归一化，下边label后边的四个值即是归一化后保留4位有效数字的x，y，w，h\n",
    "        obj_struct = [label,round((x1+x2)/(2.0*w),4), round((y1+y2)/(2.0*h),4), \n",
    "                      round((x2-x1)/(w),4),round((y2-y1)/(h),4)]\n",
    "        objects.append(obj_struct)\n",
    "    return objects\n",
    "#接下来是写入txt文件中 定义一个空的字符串\n",
    "t = ''\n",
    "#获取所有的xml文件路径\n",
    "allfilepath = []\n",
    "for file in os.listdir(xml_path):\n",
    "    if file.endswith('.xml'):\n",
    "        file = os.path.join(xml_path,file)\n",
    "        allfilepath.append(file)\n",
    "    else:\n",
    "        pass\n",
    " #生成需要的对应xml文件名的txt\n",
    "for file in allfilepath:\n",
    "    txt_path = txt_root+file.split('/')[1][:-4] + '.txt'\n",
    "    result = _read_anno(file)\n",
    "    #跳过空列表\n",
    "    if len(result)==0:\n",
    "        continue\n",
    "    #写入信息，注意每次循环结束都把t重新定义，result是一个二维列表（行数为目标个数，列对应label和位置信息），为了避免读取出错（还有一个原因是我菜），我们一个一个的写入。\n",
    "    with open(txt_path,'w') as f:\n",
    "        for line in result:\n",
    "            for a in line:\n",
    "                t = t+str(a)+' '\n",
    "            f.writelines(t)\n",
    "            f.writelines('\\n')\n",
    "            t =''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "### img 2 dota_img\n",
    "import os\n",
    "from glob import glob\n",
    "rename_img_root = r'D:\\fish\\rotated_fish128\\img_dota'\n",
    "for img in glob(rename_img_root+'/*.jpg'):\n",
    "    os.rename(img,img.replace('.jpg', '__1__0___0.jpg'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "02500.xml\n",
      "02501.xml\n",
      "02502.xml\n",
      "02503.xml\n",
      "02504.xml\n",
      "02505.xml\n",
      "02506.xml\n",
      "02507.xml\n",
      "02508.xml\n",
      "02509.xml\n",
      "02510.xml\n",
      "02511.xml\n",
      "02512.xml\n",
      "02513.xml\n",
      "02514.xml\n",
      "02515.xml\n",
      "02516.xml\n",
      "02517.xml\n",
      "02518.xml\n",
      "02519.xml\n",
      "02520.xml\n",
      "02521.xml\n",
      "02522.xml\n",
      "02523.xml\n",
      "02524.xml\n",
      "02525.xml\n",
      "02526.xml\n",
      "02527.xml\n",
      "02528.xml\n",
      "02529.xml\n",
      "02530.xml\n",
      "02531.xml\n",
      "02532.xml\n",
      "02533.xml\n",
      "02534.xml\n",
      "02535.xml\n",
      "02536.xml\n",
      "02537.xml\n",
      "02538.xml\n",
      "02539.xml\n",
      "02540.xml\n",
      "02541.xml\n",
      "02542.xml\n",
      "02543.xml\n",
      "02544.xml\n",
      "02545.xml\n",
      "02546.xml\n",
      "02547.xml\n",
      "02548.xml\n",
      "02549.xml\n",
      "02550.xml\n",
      "02551.xml\n",
      "02552.xml\n",
      "02553.xml\n",
      "02554.xml\n",
      "02555.xml\n",
      "02556.xml\n",
      "02557.xml\n",
      "02558.xml\n",
      "02559.xml\n",
      "02560.xml\n",
      "02561.xml\n",
      "02562.xml\n",
      "02563.xml\n",
      "02564.xml\n",
      "02565.xml\n",
      "02566.xml\n",
      "02567.xml\n",
      "02568.xml\n",
      "02569.xml\n",
      "02570.xml\n",
      "02571.xml\n",
      "02572.xml\n",
      "02573.xml\n",
      "02574.xml\n",
      "02575.xml\n",
      "02576.xml\n",
      "02577.xml\n",
      "02578.xml\n",
      "02579.xml\n",
      "02580.xml\n",
      "02581.xml\n",
      "02582.xml\n",
      "02583.xml\n",
      "02584.xml\n",
      "02585.xml\n",
      "02586.xml\n",
      "02587.xml\n",
      "02588.xml\n",
      "02589.xml\n",
      "02590.xml\n",
      "02591.xml\n",
      "02592.xml\n",
      "02593.xml\n",
      "02594.xml\n",
      "02595.xml\n",
      "02596.xml\n",
      "02597.xml\n",
      "02598.xml\n",
      "02599.xml\n",
      "02600.xml\n",
      "02601.xml\n",
      "02602.xml\n",
      "02603.xml\n",
      "02604.xml\n",
      "02605.xml\n",
      "02606.xml\n",
      "02607.xml\n",
      "02608.xml\n",
      "02609.xml\n",
      "02610.xml\n",
      "02611.xml\n",
      "02612.xml\n",
      "02613.xml\n",
      "02614.xml\n",
      "02615.xml\n",
      "02616.xml\n",
      "02617.xml\n",
      "02618.xml\n",
      "02619.xml\n",
      "02620.xml\n",
      "02621.xml\n",
      "02622.xml\n",
      "02623.xml\n",
      "02624.xml\n",
      "02625.xml\n",
      "02626.xml\n",
      "02627.xml\n"
     ]
    }
   ],
   "source": [
    "###rxml 2 yolo_txt\n",
    "import math\n",
    "classes = ['fish']\n",
    "def convert_r(r):\n",
    "    hudu = 180/math.pi\n",
    "    if (r>=0) and (r<math.pi):\n",
    "        ri = r*hudu\n",
    "    else:\n",
    "        ri = (r-math.pi)*hudu\n",
    "    return int(ri)\n",
    "\n",
    "import glob\n",
    "import os\n",
    "import xml.etree.ElementTree as ET\n",
    "xml_root = r'D:\\fish\\rotated_fish128\\xml/'\n",
    "txt_root = r'D:\\fish\\rotated_fish128\\yolo_txt/'\n",
    "xml_names = os.listdir(xml_root)\n",
    "for xml_name in xml_names:\n",
    "    print(xml_name)\n",
    "    tree = ET.parse(xml_root+xml_name)\n",
    "    #获取宽w和高h\n",
    "    a = tree.find('size')\n",
    "    w,h = [int(a.find('width').text),\n",
    "           int(a.find('height').text)]  \n",
    "    boxes = []\n",
    "    for obj in tree.findall('object'):\n",
    "        name = obj.find('name').text\n",
    "        label = classes.index(name)\n",
    "        bbox = obj.find('robndbox')\n",
    "        box = [float(bbox.find('cx').text), float(bbox.find('cy').text), float(bbox.find('w').text), \n",
    "               float(bbox.find('h').text), float(bbox.find('angle').text)]\n",
    "        box[0], box[2] = box[0]/w, box[2]/w\n",
    "        box[1], box[3] = box[1]/h, box[3]/h\n",
    "        box[-1] = convert_r(box[-1])\n",
    "        box = [float(label)] + box\n",
    "        boxes.append(box)\n",
    "    with open(txt_root+xml_name[:-4]+'__1__0___0.txt', 'w') as f:\n",
    "        for box in boxes:\n",
    "            for e in box:\n",
    "                f.write(str(e))\n",
    "                f.write(' ')\n",
    "            f.write('\\n')\n",
    "        f.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "### rxml 2 dotatxt\n",
    "import os\n",
    "import xml.etree.ElementTree as ET\n",
    "import math\n",
    "classes = ['fish']\n",
    "touwenjian = 'imagesource:GoogleEarth\\ngsd:0.115726939386\\n'\n",
    "def edit_xml(xml_file):\n",
    "    all_ = []\n",
    "    tree = ET.parse(xml_file)\n",
    "    objs = tree.findall('object')\n",
    "    for ix, obj in enumerate(objs):\n",
    "        obj_type = obj.find('type')\n",
    "#         print(xml_file)\n",
    "        obj_bnd = obj.find('robndbox')\n",
    "        obj_cx = obj_bnd.find('cx')\n",
    "        obj_cy = obj_bnd.find('cy')\n",
    "        obj_w = obj_bnd.find('w')\n",
    "        obj_h = obj_bnd.find('h')\n",
    "        obj_angle = obj_bnd.find('angle')\n",
    "        name = obj.find('name').text\n",
    "        label = classes.index(name)\n",
    "        cx = float(obj_cx.text)\n",
    "        cy = float(obj_cy.text)\n",
    "        w = float(obj_w.text)\n",
    "        h = float(obj_h.text)\n",
    "        angle = float(obj_angle.text)\n",
    "\n",
    "        x0, y0 = rotatePoint(cx, cy, cx - w / 2, cy - h / 2, -angle)\n",
    "        x1, y1 = rotatePoint(cx, cy, cx + w / 2, cy - h / 2, -angle)\n",
    "        x2, y2 = rotatePoint(cx, cy, cx + w / 2, cy + h / 2, -angle)\n",
    "        x3, y3 = rotatePoint(cx, cy, cx - w / 2, cy + h / 2, -angle)\n",
    "        all_.append(x0+' '+y0+' '+x1+' '+y1+' '+x2+' '+y2+' '+x3+' '+y3+' '+name+' '+str(label)+'\\n')\n",
    "    return all_\n",
    "# 转换成四点坐标\n",
    "def rotatePoint(xc, yc, xp, yp, theta):\n",
    "    xoff = xp - xc;\n",
    "    yoff = yp - yc;\n",
    "    cosTheta = math.cos(theta)\n",
    "    sinTheta = math.sin(theta)\n",
    "    pResx = cosTheta * xoff + sinTheta * yoff\n",
    "    pResy = - sinTheta * xoff + cosTheta * yoff\n",
    "    return str(int(xc + pResx)), str(int(yc + pResy))\n",
    "\n",
    "txt_root = r'D:\\fish\\rotated_fish128\\dota_txt/'\n",
    "xml_root = r'D:\\fish\\rotated_fish128\\xml'\n",
    "for xml in os.listdir(xml_root):\n",
    "    boxes = edit_xml(xml_root+'/'+xml)\n",
    "    with open(txt_root+xml[:-3]+'txt', 'w') as f:\n",
    "        f.write(touwenjian)\n",
    "        for box in boxes:\n",
    "            f.write(str(box))\n",
    "        f.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train and val size 128\n",
      "traub suze 102\n"
     ]
    }
   ],
   "source": [
    "# voc segmentation\n",
    "import os\n",
    "import random \n",
    "random.seed(0)\n",
    "segfilepath=r'D:\\week3\\Week3_mmseg\\mmsegmentation-0.14.0\\tools/data/rs128/SegmentationClass'\n",
    "saveBasePath=r\"D:\\week3\\Week3_mmseg\\mmsegmentation-0.14.0\\tools/data/rs128/ImageSets/Segmentation/\"\n",
    "#----------------------------------------------------------------------#\n",
    "#   想要增加测试集修改trainval_percent\n",
    "#   修改train_percent用于改变验证集的比例\n",
    "#----------------------------------------------------------------------#\n",
    "trainval_percent=1\n",
    "train_percent=0.8\n",
    "\n",
    "temp_seg = os.listdir(segfilepath)\n",
    "total_seg = []\n",
    "for seg in temp_seg:\n",
    "    if seg.endswith(\".png\"):\n",
    "        total_seg.append(seg)\n",
    "\n",
    "num=len(total_seg)  \n",
    "list=range(num)  \n",
    "tv=int(num*trainval_percent)  \n",
    "tr=int(tv*train_percent)  \n",
    "trainval= random.sample(list,tv)  \n",
    "train=random.sample(trainval,tr)  \n",
    " \n",
    "print(\"train and val size\",tv)\n",
    "print(\"traub suze\",tr)\n",
    "ftrainval = open(os.path.join(saveBasePath,'trainval.txt'), 'w')  \n",
    "ftest = open(os.path.join(saveBasePath,'test.txt'), 'w')  \n",
    "ftrain = open(os.path.join(saveBasePath,'train.txt'), 'w')  \n",
    "fval = open(os.path.join(saveBasePath,'val.txt'), 'w')  \n",
    " \n",
    "for i  in list:  \n",
    "    name=total_seg[i][:-4]+'\\n'  \n",
    "    if i in trainval:  \n",
    "        ftrainval.write(name)  \n",
    "        if i in train:  \n",
    "            ftrain.write(name)  \n",
    "        else:  \n",
    "            fval.write(name)  \n",
    "    else:  \n",
    "        ftest.write(name)  \n",
    "\n",
    "ftrainval.close()  \n",
    "ftrain.close()  \n",
    "fval.close()  \n",
    "ftest .close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "from PIL import Image\n",
    "import numpy as np\n",
    "img = Image.fromarray(np.uint8(np.array(Image.open(\\\n",
    "    r\"D:\\week3\\Week3_mmseg\\mmsegmentation-0.14.0\\tools\\data\\rs128\\SegmentationClass\\0000.png\"))*55))\n",
    "img.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os \n",
    "from pycocotools.coco import COCO\n",
    "from PIL import Image,ImageDraw\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib\n",
    "matplotlib.use('TkAgg')\n",
    "json_path = r\"D:\\fish\\standard_fish128\\json\\data.json\"\n",
    "img_path = r'D:\\fish\\standard_fish128\\img'\n",
    "coco = COCO(annotation_file=json_path)\n",
    "ids = list(sorted(coco.imgs.keys()))\n",
    "print(len(ids))\n",
    "coco_classes = dict([(v['id'],v['name']) for k,v in coco.cats.items()])\n",
    "for img_id in ids[:3]:\n",
    "    ann_ids = coco.getAnnIds(imgIds=img_id)\n",
    "    targets = coco.loadAnns(ann_ids)\n",
    "    path = coco.loadImgs(img_id)[0]['file_name']\n",
    "    img = Image.open(os.path.join(img_path,path)).convert('RGB')\n",
    "    draw = ImageDraw.Draw(img)\n",
    "    for target in targets:\n",
    "        x,y,w,h = target['bbox']\n",
    "        x1,y1,x2,y2 = x,y,int(x+w),int(y+h)\n",
    "        draw.rectangle((x1,y1,x2,y2))\n",
    "        draw.text((x1,y1),coco_classes[target['category_id']])\n",
    "    plt.imshow(img)\n",
    "    plt.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
