{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### online_test 脚本\n",
    "这个book脚本，设计目的是在线计算base和sam的分割mIoU，直接进行对比。\n",
    "sam的分割结果不进行输出保存png。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/yangshuo/.conda/envs/datawhale/lib/python3.8/site-packages/mmcv/__init__.py:20: UserWarning: On January 1, 2023, MMCV will release v2.0.0, in which it will remove components related to the training process and add a data transformation module. In addition, it will rename the package names mmcv to mmcv-lite and mmcv-full to mmcv. See https://github.com/open-mmlab/mmcv/blob/master/docs/en/compatibility.md for more details.\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "from mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot\n",
    "from mmseg.core.evaluation import get_palette\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "import numpy as np\n",
    "import cv2\n",
    "from sklearn.cluster import DBSCAN\n",
    "import torch\n",
    "import matplotlib.pyplot as plt\n",
    "import os.path as osp\n",
    "import glob\n",
    "from PIL import Image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def show_mask(mask, ax, random_color=False):\n",
    "    if random_color:\n",
    "        color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)\n",
    "    else:\n",
    "        color = np.array([30/255, 144/255, 255/255, 0.6])\n",
    "    h, w = mask.shape[-2:]\n",
    "    mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)\n",
    "    ax.imshow(mask_image)\n",
    "    \n",
    "def show_points(coords, labels, ax, marker_size=375):\n",
    "    pos_points = coords[labels==1]\n",
    "    neg_points = coords[labels==0]\n",
    "    ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)\n",
    "    ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)   \n",
    "    \n",
    "def show_box(box, ax):\n",
    "    x0, y0 = box[0], box[1]\n",
    "    w, h = box[2] - box[0], box[3] - box[1]\n",
    "    ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2)) \n",
    "    \n",
    "def calculate_iou(gt, pred):\n",
    "    if not gt.any() and  not pred.any() :\n",
    "        return 1\n",
    "    intersection = np.logical_and(gt, pred)\n",
    "    union = np.logical_or(gt, pred)\n",
    "    iou_score = np.sum(intersection) / np.sum(union)\n",
    "    return iou_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 初始模型加载\n",
    "\n",
    "# # 弱分割器加载\n",
    "config_file = '/home/yangshuo/past_comp/DPLBV3P/code/x_trick/x_balanced_class.py'\n",
    "checkpoint_file = '/home/yangshuo/past_comp/DPLBV3P/code/x_trick/work_dir/dplab/iter_20000.pth'\n",
    "\n",
    "# 强分割器加载\n",
    "# config_file = '/home/yangshuo/past_comp/DPLBV3P/code/1_SAM/weight/aug_all.py'\n",
    "# checkpoint_file = '/home/yangshuo/past_comp/DPLBV3P/code/1_SAM/weight/iter_40000.pth'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/yangshuo/.conda/envs/datawhale/lib/python3.8/site-packages/mmsegmentation-0.30.0-py3.8.egg/mmseg/models/decode_heads/decode_head.py:94: UserWarning: For binary segmentation, we suggest using`out_channels = 1` to define the outputchannels of segmentor, and use `threshold`to convert seg_logist into a predictionapplying a threshold\n",
      "  warnings.warn('For binary segmentation, we suggest using'\n",
      "/home/yangshuo/.conda/envs/datawhale/lib/python3.8/site-packages/mmsegmentation-0.30.0-py3.8.egg/mmseg/models/losses/cross_entropy_loss.py:235: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "load checkpoint from local path: /home/yangshuo/past_comp/DPLBV3P/code/x_trick/work_dir/dplab/iter_20000.pth\n"
     ]
    }
   ],
   "source": [
    "# 初始模型初始化\n",
    "model = init_segmentor(config_file, checkpoint_file, device='cuda:0')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# sam的加载和初始化\n",
    "sam_checkpoint = \"/home/yangshuo/past_comp/segment-anything/pretrain/sam_vit_h_4b8939.pth\"\n",
    "device = \"cuda:1\"\n",
    "model_type = \"default\"\n",
    "import sys\n",
    "sys.path.append(\"..\")\n",
    "from segment_anything import sam_model_registry, SamPredictor\n",
    "\n",
    "sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)\n",
    "sam.to(device=device)\n",
    "\n",
    "predictor = SamPredictor(sam)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "eval_root = '/data/yangshuo/DPLABV3P/Ali_building_2class/eval'\n",
    "eval_gt = '/data/yangshuo/DPLABV3P/Ali_building_2class/train_gt_0_1/'\n",
    "images = glob.glob(eval_root + '/*.jpg')  # 读取所有的jpg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "dbscan = DBSCAN(eps=11, min_samples=300) # 根据实际情况调整参数 eps 和 min_samples"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# mIoU 计算变量\n",
    "base_miou_sum = 0\n",
    "num_images = 0\n",
    "sam_miou_sum = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  4%|▍         | 116/3000 [01:54<47:27,  1.01it/s]  \n"
     ]
    }
   ],
   "source": [
    "for idx, name in enumerate(tqdm(images)):\n",
    "    \n",
    "    is_exist = True \n",
    "    \n",
    "    file_name_gt = name.split('/')[-1].split('.')[0]  \n",
    "    gt = np.array(Image.open(eval_gt + file_name_gt + '.png'))\n",
    "    \n",
    "    if not gt.any():\n",
    "        continue \n",
    "    \n",
    "    image = cv2.imread(name)\n",
    "    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
    "    predictor.set_image(image)\n",
    "    \n",
    "    \n",
    "#######################################################################################################\n",
    "#######################################################################################################\n",
    "#     # 原始图片可视化\n",
    "#     plt.figure(figsize=(10,10))\n",
    "#     plt.imshow(image)\n",
    "#     plt.axis('on')\n",
    "#     plt.show()\n",
    "#######################################################################################################\n",
    "#######################################################################################################\n",
    "    # 获得初始推理\n",
    "    result = inference_segmentor(model, name)  \n",
    "    img = np.array(result[0])\n",
    "    \n",
    "#######################################################################################################\n",
    "#######################################################################################################\n",
    "    #初始推理可视化\n",
    "#     show_result_pyplot(model, image, result, [[0,0,0],[255,255,255]])\n",
    "#######################################################################################################\n",
    "#######################################################################################################\n",
    "\n",
    "      \n",
    "    # 计算初步推理的miou，这个值是绝对的\n",
    "    \n",
    "    iou_score = calculate_iou(gt , img)\n",
    "    base_miou_sum += iou_score\n",
    "#     print('base:' , iou_score)\n",
    "    \n",
    "    \n",
    "    \n",
    "    \n",
    "    prospect_idx = np.where(img == 1)   # 拿到前景坐标\n",
    "    background_idx = np.where(img == 0 )   # 拿到背景坐标\n",
    "    prospect_coords = np.column_stack((prospect_idx[1], prospect_idx[0]))  # 获取前景的坐标\n",
    "    if len(prospect_coords) == 0 : \n",
    "        is_exist = False  # 如果基础分割器认为当前图片没有建筑\n",
    "    \n",
    "\n",
    "    background_coords = np.column_stack((background_idx[1], background_idx[0]))  # 获取背景的坐标\n",
    "\n",
    "    \n",
    "########################################################################################################\n",
    "    # 下面是prompt策略\n",
    "    \n",
    "    # 对背景随机挑选三十个像素坐标，我们默认所有图片的背景像素个数都在30个以上\n",
    "    indices = np.random.choice(np.arange(len(background_coords)), size=20, replace=False)  \n",
    "    background_coords = background_coords[indices]\n",
    "    \n",
    "    # 为了增强sam的分割效果，我们对前景的所有坐标随机挑选30个坐标，我们默认如果当前图片有建筑的情况下，像素坐标有30个以上\n",
    "    random_prospect_coords = None\n",
    "    representatives = np.array([])\n",
    "    if is_exist:\n",
    "        random_prospect_coords = prospect_coords[np.random.choice(np.arange(len(prospect_coords)), size=30, replace=True)]\n",
    "        # DBSCAN做聚类\n",
    "        dbscan.fit(prospect_coords)  # 获取实例坐标代表\n",
    "        # 获取聚类后的中心点坐标\n",
    "        unique_labels = set(dbscan.labels_)\n",
    "        centers = []\n",
    "        for label in unique_labels:\n",
    "            if label == -1: # 跳过噪声点\n",
    "                continue\n",
    "            class_member_mask = (dbscan.labels_ == label)\n",
    "            center = np.average(prospect_coords[class_member_mask], axis=0)\n",
    "            centers.append(center.astype(int))\n",
    "\n",
    "            # 返回每个聚类中心的坐标\n",
    "            representatives = np.array(centers)\n",
    "\n",
    "    if representatives.shape == (0,) : # 选不出代表\n",
    "        is_exist = False \n",
    "#         representatives = random_prospect_coords\n",
    "        \n",
    "    ###################################################################################################\n",
    "    # SAM分割阶段\n",
    "    masks = None\n",
    "    if is_exist :\n",
    "        # prompt点进行拼接\n",
    "        input_point = np.concatenate((random_prospect_coords , representatives , background_coords) , axis = 0 )\n",
    "#         input_point = np.concatenate((random_prospect_coords , representatives) , axis = 0 )\n",
    "    \n",
    "    \n",
    "        # label拼接\n",
    "        prospect_label = np.ones(len(random_prospect_coords) + len(representatives))  # 前景\n",
    "       \n",
    "        \n",
    "        background_label = np.zeros(len(background_coords))\n",
    "        input_label = np.append(prospect_label , background_label)\n",
    "#         input_label = prospect_label\n",
    "    \n",
    "\n",
    "        masks, scores, logits = predictor.predict(\n",
    "            point_coords=input_point,\n",
    "            point_labels=input_label,\n",
    "            multimask_output=False\n",
    "        )\n",
    "        mask_input = logits[np.argmax(scores), :, :]  # Choose the model's best mask\n",
    "        masks, _, _ = predictor.predict(\n",
    "            point_coords=input_point,\n",
    "            point_labels=input_label,\n",
    "            mask_input=mask_input[None, :, :],\n",
    "            multimask_output=False\n",
    "        )\n",
    "    else :  # 不存在建筑\n",
    "        # \n",
    "        masks = np.zeros([512,512])\n",
    "\n",
    "##########################################################################################################\n",
    "    \n",
    "  \n",
    "    # 计算sam的miou\n",
    "    iou_score = calculate_iou(gt , masks)\n",
    "    sam_miou_sum += iou_score\n",
    "    num_images += 1\n",
    "#     print('sam' , iou_score)\n",
    "\n",
    "    \n",
    "    ########################################\n",
    "    #######################################################################################################\n",
    "    # SAM 可视化\n",
    "#     plt.figure(figsize=(10,10))\n",
    "#     plt.imshow(image)\n",
    "#     show_mask(masks, plt.gca())\n",
    "#     show_points(input_point, input_label, plt.gca())\n",
    "#     plt.title(f\"Mask, Score\", fontsize=18)\n",
    "#     plt.axis('off')\n",
    "#     plt.show()  \n",
    "    #######################################################################################################\n",
    "    \n",
    "    if num_images >= 99 : break \n",
    "#     if idx >=30 : break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.4240273073399286"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "base_mIoU = base_miou_sum / num_images\n",
    "base_mIoU"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.43797600157145294"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sam_mIoU = sam_miou_sum / num_images\n",
    "sam_mIoU"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 155,
   "metadata": {},
   "outputs": [],
   "source": [
    "############################  DONE HERE  ###############################################"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# discard\n",
    "# test_root = '/data/yangshuo/DPLABV3P/Ali_building_2class/test_a/'\n",
    "# test_mask = pd.read_csv('/data/yangshuo/DPLABV3P/Ali_building_2class/test_a_samplesubmit.csv', sep='\\t', names=['name', 'mask'])\n",
    "# test_mask['name'] = test_mask['name'].apply(lambda x: test_root + x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# backup\n",
    "# for idx, name in enumerate(tqdm(images)):\n",
    "    \n",
    "#     is_exist = True \n",
    "    \n",
    "#     file_name_gt = name.split('/')[-1].split('.')[0]  \n",
    "#     gt = np.array(Image.open(eval_gt + file_name_gt + '.png'))\n",
    "    \n",
    "#     if not gt.any():\n",
    "#         continue \n",
    "    \n",
    "#     image = cv2.imread(name)\n",
    "#     image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
    "#     predictor.set_image(image)\n",
    "    \n",
    "    \n",
    "# #######################################################################################################\n",
    "# #######################################################################################################\n",
    "# #     # 原始图片可视化\n",
    "# #     plt.figure(figsize=(10,10))\n",
    "# #     plt.imshow(image)\n",
    "# #     plt.axis('on')\n",
    "# #     plt.show()\n",
    "# #######################################################################################################\n",
    "# #######################################################################################################\n",
    "#     # 获得初始推理\n",
    "#     result = inference_segmentor(model, name)  \n",
    "#     img = np.array(result[0])\n",
    "    \n",
    "# #######################################################################################################\n",
    "# #######################################################################################################\n",
    "#     #初始推理可视化\n",
    "# #     show_result_pyplot(model, image, result, [[0,0,0],[255,255,255]])\n",
    "# #######################################################################################################\n",
    "# #######################################################################################################\n",
    "\n",
    "      \n",
    "#     # 计算初步推理的miou，这个值是绝对的\n",
    "    \n",
    "#     iou_score = calculate_iou(gt , img)\n",
    "#     base_miou_sum += iou_score\n",
    "# #     print('base:' , iou_score)\n",
    "    \n",
    "    \n",
    "    \n",
    "    \n",
    "#     prospect_idx = np.where(img == 1)   # 拿到前景坐标\n",
    "#     background_idx = np.where(img == 0 )   # 拿到背景坐标\n",
    "#     prospect_coords = np.column_stack((prospect_idx[1], prospect_idx[0]))  # 获取前景的坐标\n",
    "#     if len(prospect_coords) == 0 : \n",
    "#         is_exist = False  # 如果基础分割器认为当前图片没有建筑\n",
    "    \n",
    "\n",
    "#     background_coords = np.column_stack((background_idx[1], background_idx[0]))  # 获取背景的坐标\n",
    "\n",
    "    \n",
    "# ########################################################################################################\n",
    "#     # 下面是prompt策略\n",
    "    \n",
    "#     # 对背景随机挑选三十个像素坐标，我们默认所有图片的背景像素个数都在30个以上\n",
    "#     indices = np.random.choice(np.arange(len(background_coords)), size=20, replace=False)  \n",
    "#     background_coords = background_coords[indices]\n",
    "    \n",
    "#     # 为了增强sam的分割效果，我们对前景的所有坐标随机挑选30个坐标，我们默认如果当前图片有建筑的情况下，像素坐标有30个以上\n",
    "#     random_prospect_coords = None\n",
    "#     representatives = np.array([])\n",
    "#     if is_exist:\n",
    "#         random_prospect_coords = prospect_coords[np.random.choice(np.arange(len(prospect_coords)), size=60, replace=True)]\n",
    "#         # DBSCAN做聚类\n",
    "#         dbscan.fit(prospect_coords)  # 获取实例坐标代表\n",
    "#         # 获取聚类后的中心点坐标\n",
    "#         unique_labels = set(dbscan.labels_)\n",
    "#         centers = []\n",
    "#         for label in unique_labels:\n",
    "#             if label == -1: # 跳过噪声点\n",
    "#                 continue\n",
    "#             class_member_mask = (dbscan.labels_ == label)\n",
    "#             center = np.average(prospect_coords[class_member_mask], axis=0)\n",
    "#             centers.append(center.astype(int))\n",
    "\n",
    "#             # 返回每个聚类中心的坐标\n",
    "#             representatives = np.array(centers)\n",
    "\n",
    "#     if representatives.shape == (0,) : # 选不出代表\n",
    "# #         is_exist = False \n",
    "#         representatives = random_prospect_coords\n",
    "        \n",
    "#     ###################################################################################################\n",
    "#     # SAM分割阶段\n",
    "#     masks = None\n",
    "#     if is_exist :\n",
    "#         # prompt点进行拼接\n",
    "#         input_point = np.concatenate((random_prospect_coords , representatives , background_coords) , axis = 0 )\n",
    "\n",
    "#         # label拼接\n",
    "#         prospect_label = np.ones(len(random_prospect_coords) + len(representatives))  # 前景\n",
    "#         background_label = np.zeros(len(background_coords))\n",
    "#         input_label = np.append(prospect_label , background_label)\n",
    "\n",
    "\n",
    "#         masks, scores, logits = predictor.predict(\n",
    "#             point_coords=input_point,\n",
    "#             point_labels=input_label,\n",
    "#             multimask_output=False\n",
    "#         )\n",
    "#         mask_input = logits[np.argmax(scores), :, :]  # Choose the model's best mask\n",
    "#         masks, _, _ = predictor.predict(\n",
    "#             point_coords=input_point,\n",
    "#             point_labels=input_label,\n",
    "#             mask_input=mask_input[None, :, :],\n",
    "#             multimask_output=False\n",
    "#         )\n",
    "#     else :  # 不存在建筑\n",
    "#         # \n",
    "#         masks = np.zeros([512,512])\n",
    "\n",
    "# ##########################################################################################################\n",
    "    \n",
    "  \n",
    "#     # 计算sam的miou\n",
    "#     iou_score = calculate_iou(gt , masks)\n",
    "#     sam_miou_sum += iou_score\n",
    "#     num_images += 1\n",
    "# #     print('sam' , iou_score)\n",
    "\n",
    "    \n",
    "#     ########################################\n",
    "#     #######################################################################################################\n",
    "#     # SAM 可视化\n",
    "# #     plt.figure(figsize=(10,10))\n",
    "# #     plt.imshow(image)\n",
    "# #     show_mask(masks, plt.gca())\n",
    "# #     show_points(input_point, input_label, plt.gca())\n",
    "# #     plt.title(f\"Mask, Score\", fontsize=18)\n",
    "# #     plt.axis('off')\n",
    "# #     plt.show()  \n",
    "#     #######################################################################################################\n",
    "    \n",
    "#     if num_images >= 99 : break \n",
    "# #     if idx >=30 : break"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
