{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "a917b435",
   "metadata": {},
   "outputs": [],
   "source": [
    "import argparse\n",
    "import os\n",
    "import pickle\n",
    "import warnings\n",
    "import xml.etree.ElementTree as ET\n",
    "from plistlib import loads\n",
    "\n",
    "import cv2\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import spectral.io.envi as envi\n",
    "import torch\n",
    "from linformer import Linformer\n",
    "from numpy import flip\n",
    "from sklearn import model_selection, svm\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "\n",
    "from AutoGPU import autoGPU\n",
    "from comparetools.global_module.network import CDCNN_network as CDCNN\n",
    "from hongdanfeng.vit_hong import ViT as hongViT\n",
    "from models import (_1DCNN, _2DCNN, _3DCNN, _3DCNN_1DCNN, _3DCNN_AM, PURE1DCNN,\n",
    "                    PURE2DCNN, PURE3DCNN, PURE3DCNN_2AM, SAE, SAE_AM,\n",
    "                    DBDA_network, HamidaEtAl, LeeEtAl, SSRN_network, _2dCNN,\n",
    "                    myknn, mysvm)\n",
    "from NNViT import ViT as NNViT\n",
    "from NViT import ViT as NViT\n",
    "from training_utils import TrainProcess, setup_seed\n",
    "from utils import (DataPreProcess, DataResult, get_imggnd, listen_plot, myplot,\n",
    "                   plot, setpath, splitdata)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "7df8961c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练比例为 训练集0.05 验证集0.01 测试集0.94\n",
      "模型为TransGan\n"
     ]
    }
   ],
   "source": [
    "dataset = './pathology/data/032370b-20x-roi2'\n",
    "NTr = 0.05\n",
    "trialnumber = 1   \n",
    "NTe = 0.94\n",
    "NVa = 0.01\n",
    "patchsize = 9\n",
    "modelname = 'TransGan' \n",
    "gpu_num = 1\n",
    "depth = 5\n",
    "load_bestmodel = 1\n",
    "gpu_ids = -1\n",
    "\n",
    "\n",
    "print('训练比例为 训练集{} 验证集{} 测试集{}'.format(NTr, NVa, NTe))\n",
    "print('模型为{}'.format(modelname))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "8b51debe",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Errno 2] No such file or directory: './pathology/032370b/roi2/Split/proportion/Tr_0.05/Va_0.01/Te_0.94/1/result/TransGan/result.pkl'\n"
     ]
    }
   ],
   "source": [
    "resultpath, imagepath, datapath = setpath(dataset, trialnumber , NTr, \n",
    "                                                NVa, NTe, modelname)\n",
    "    \n",
    "\n",
    "\n",
    "IMAGE, GND = get_imggnd(dataset)\n",
    "\n",
    "try: \n",
    "    with open(resultpath + 'result.pkl', 'rb') as f:\n",
    "        result = pickle.load(f)\n",
    "        D = result\n",
    "#         D = DataResult()\n",
    "#         D.y_score = result.y_score\n",
    "#         D.y_true = result.y_true\n",
    "#         D.get_metric()\n",
    "        print(100*D.accuracy_score, 100*D.auc, 100*D.precision, 100*D.recall)\n",
    "except Exception as e:\n",
    "    print(e)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "c4efbcc9",
   "metadata": {},
   "outputs": [
    {
     "ename": "AttributeError",
     "evalue": "'DataResult' object has no attribute 'y_true'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-37-82f9d818bfd5>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mresult\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_metric\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m~/Programming/python/pathology/utils.py\u001b[0m in \u001b[0;36mget_metric\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m     46\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     47\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mget_metric\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 48\u001b[0;31m         \u001b[0mfpr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtpr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mthresholds\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmetrics\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mroc_curve\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0my_true\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0my_score\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos_label\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     49\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mauc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmetrics\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mauc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfpr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtpr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     50\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mylabel_pre\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmap\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mround\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0my_score\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mAttributeError\u001b[0m: 'DataResult' object has no attribute 'y_true'"
     ]
    }
   ],
   "source": [
    " result.get_metric()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "2a3e7c06",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Lucky Dog! Patch data already exists!\n",
      "p atch 的尺寸为\n",
      "(65535, 60, 9, 9)\n"
     ]
    }
   ],
   "source": [
    "processeddata = DataPreProcess(IMAGE, patchsize, datapath, 1).processeddata\n",
    "print('patch 的尺寸为') \n",
    "print(processeddata['train'].patch.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "9f532aad",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(1258293, 60, 9, 9)"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "processeddata['test'].patch.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "id": "66a798bf",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "已选择第5张卡，型号为TITAN RTX，22610MB/24220MB显存可用\n",
      "读了最佳模型, 继续训练\n",
      "0.9737240874976361 0.9954468536299325 0.9729243757077377 0.9842173695615936\n"
     ]
    }
   ],
   "source": [
    "modelname = 'PURE2DCNN' \n",
    "resultpath, imagepath, datapath = setpath(dataset, trialnumber , NTr, \n",
    "                                                NVa, NTe, modelname)\n",
    "\n",
    "##############################\n",
    "##############################\n",
    "#######################\n",
    "if gpu_ids != -1:\n",
    "        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_ids)\n",
    "else:\n",
    "    while True:\n",
    "        try:\n",
    "            autoGPU(gpu_num, 5000)\n",
    "            break\n",
    "        except Exception as e :\n",
    "            print(e)\n",
    "            pass\n",
    "\n",
    "    \n",
    "modelname = 'NDIS_MODEL' \n",
    "model = {\n",
    "        'SAE': SAE,\n",
    "        'PURE1DCNN': PURE1DCNN,\n",
    "        'PURE2DCNN':PURE2DCNN,\n",
    "        'PURE3DCNN': PURE3DCNN,\n",
    "        'DBDA': DBDA_network,\n",
    "        '1DCNN': _1DCNN,\n",
    "        'CDCNN':CDCNN,\n",
    "        'SSRN': SSRN_network,\n",
    "        'TransGan':NViT,\n",
    "        'NDIS_MODEL': NViT,\n",
    "        'NNDIS_MODEL':NNViT,\n",
    "        'DanfengViT': hongViT,\n",
    "        'SimpledanfengViT': hongViT(mode='ViT'),\n",
    "        'NViTBaseline': NViT(num_classes=2, depth=depth)}\n",
    "\n",
    "model = model[modelname]\n",
    "if isinstance(model, type):\n",
    "    model = model()\n",
    "# spliteddata = splitdata(IMAGE, GND, datapaPUth , trainnum=eval(NTr), validnum=eval(NVa), testnum=eval(NTe))\n",
    "\n",
    "# model = torch.load('pathology/032370b/roi2/Split/proportion/Tr_0.05/Va_0.01/Te_0.94/1/result/TransGan.bak/bestmodel.pth')\n",
    "\n",
    "if load_bestmodel:\n",
    "    t_para = torch.load(resultpath + 'bestmodel.pth')\n",
    "    try:\n",
    "        model.load_state_dict(t_para)\n",
    "    except:\n",
    "        model.load_state_dict({k.replace('module.', ''):v for k,v in t_para.items()})\n",
    "    print('读了最佳模型, 继续训练')\n",
    "\n",
    "\n",
    "T = TrainProcess(model=model.to('cuda'),\n",
    "                modelname=modelname+str(depth),\n",
    "                processeddata=processeddata,\n",
    "                train_config='./config_normal.yaml',\n",
    "                )\n",
    "\n",
    "T.evaluate(T.test_loader, T.test_result)\n",
    "print(T.test_result.accuracy_score, T.test_result.auc, T.test_result.precision, T.test_result.recall)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "c486b018",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ViT(\n",
       "  (patch_to_embedding): Linear(in_features=81, out_features=64, bias=True)\n",
       "  (dropout): Dropout(p=0.0, inplace=False)\n",
       "  (transformer): Transformer(\n",
       "    (layers): ModuleList(\n",
       "      (0): ModuleList(\n",
       "        (0): Residual(\n",
       "          (fn): PreNorm(\n",
       "            (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
       "            (fn): Attention(\n",
       "              (to_qkv): Linear(in_features=64, out_features=192, bias=False)\n",
       "              (to_out): Sequential(\n",
       "                (0): Linear(in_features=64, out_features=64, bias=True)\n",
       "                (1): Dropout(p=0.0, inplace=False)\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (1): Residual(\n",
       "          (fn): PreNorm(\n",
       "            (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
       "            (fn): FeedForward(\n",
       "              (net): Sequential(\n",
       "                (0): Linear(in_features=64, out_features=8, bias=True)\n",
       "                (1): GELU()\n",
       "                (2): Dropout(p=0.0, inplace=False)\n",
       "                (3): Linear(in_features=8, out_features=64, bias=True)\n",
       "                (4): Dropout(p=0.0, inplace=False)\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (1): ModuleList(\n",
       "        (0): Residual(\n",
       "          (fn): PreNorm(\n",
       "            (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
       "            (fn): Attention(\n",
       "              (to_qkv): Linear(in_features=64, out_features=192, bias=False)\n",
       "              (to_out): Sequential(\n",
       "                (0): Linear(in_features=64, out_features=64, bias=True)\n",
       "                (1): Dropout(p=0.0, inplace=False)\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (1): Residual(\n",
       "          (fn): PreNorm(\n",
       "            (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
       "            (fn): FeedForward(\n",
       "              (net): Sequential(\n",
       "                (0): Linear(in_features=64, out_features=8, bias=True)\n",
       "                (1): GELU()\n",
       "                (2): Dropout(p=0.0, inplace=False)\n",
       "                (3): Linear(in_features=8, out_features=64, bias=True)\n",
       "                (4): Dropout(p=0.0, inplace=False)\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (2): ModuleList(\n",
       "        (0): Residual(\n",
       "          (fn): PreNorm(\n",
       "            (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
       "            (fn): Attention(\n",
       "              (to_qkv): Linear(in_features=64, out_features=192, bias=False)\n",
       "              (to_out): Sequential(\n",
       "                (0): Linear(in_features=64, out_features=64, bias=True)\n",
       "                (1): Dropout(p=0.0, inplace=False)\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (1): Residual(\n",
       "          (fn): PreNorm(\n",
       "            (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
       "            (fn): FeedForward(\n",
       "              (net): Sequential(\n",
       "                (0): Linear(in_features=64, out_features=8, bias=True)\n",
       "                (1): GELU()\n",
       "                (2): Dropout(p=0.0, inplace=False)\n",
       "                (3): Linear(in_features=8, out_features=64, bias=True)\n",
       "                (4): Dropout(p=0.0, inplace=False)\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (3): ModuleList(\n",
       "        (0): Residual(\n",
       "          (fn): PreNorm(\n",
       "            (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
       "            (fn): Attention(\n",
       "              (to_qkv): Linear(in_features=64, out_features=192, bias=False)\n",
       "              (to_out): Sequential(\n",
       "                (0): Linear(in_features=64, out_features=64, bias=True)\n",
       "                (1): Dropout(p=0.0, inplace=False)\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (1): Residual(\n",
       "          (fn): PreNorm(\n",
       "            (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
       "            (fn): FeedForward(\n",
       "              (net): Sequential(\n",
       "                (0): Linear(in_features=64, out_features=8, bias=True)\n",
       "                (1): GELU()\n",
       "                (2): Dropout(p=0.0, inplace=False)\n",
       "                (3): Linear(in_features=8, out_features=64, bias=True)\n",
       "                (4): Dropout(p=0.0, inplace=False)\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (4): ModuleList(\n",
       "        (0): Residual(\n",
       "          (fn): PreNorm(\n",
       "            (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
       "            (fn): Attention(\n",
       "              (to_qkv): Linear(in_features=64, out_features=192, bias=False)\n",
       "              (to_out): Sequential(\n",
       "                (0): Linear(in_features=64, out_features=64, bias=True)\n",
       "                (1): Dropout(p=0.0, inplace=False)\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (1): Residual(\n",
       "          (fn): PreNorm(\n",
       "            (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
       "            (fn): FeedForward(\n",
       "              (net): Sequential(\n",
       "                (0): Linear(in_features=64, out_features=8, bias=True)\n",
       "                (1): GELU()\n",
       "                (2): Dropout(p=0.0, inplace=False)\n",
       "                (3): Linear(in_features=8, out_features=64, bias=True)\n",
       "                (4): Dropout(p=0.0, inplace=False)\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (catchlayer): Sequential(\n",
       "      (0): Residual(\n",
       "        (fn): PreNorm(\n",
       "          (norm): LayerNorm((3904,), eps=1e-05, elementwise_affine=True)\n",
       "          (fn): Attention(\n",
       "            (to_qkv): Linear(in_features=3904, out_features=768, bias=False)\n",
       "            (to_out): Sequential(\n",
       "              (0): Linear(in_features=256, out_features=3904, bias=True)\n",
       "              (1): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (1): Residual(\n",
       "        (fn): PreNorm(\n",
       "          (norm): LayerNorm((3904,), eps=1e-05, elementwise_affine=True)\n",
       "          (fn): FeedForward(\n",
       "            (net): Sequential(\n",
       "              (0): Linear(in_features=3904, out_features=64, bias=True)\n",
       "              (1): GELU()\n",
       "              (2): Dropout(p=0.1, inplace=False)\n",
       "              (3): Linear(in_features=64, out_features=3904, bias=True)\n",
       "              (4): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (conv): Conv2d(5, 1, kernel_size=(1, 1), stride=(1, 1))\n",
       "  )\n",
       "  (to_latent): Identity()\n",
       "  (mlp_head): Sequential(\n",
       "    (0): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
       "    (1): Linear(in_features=64, out_features=3, bias=True)\n",
       "    (2): Sigmoid()\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 41,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f941666b",
   "metadata": {},
   "outputs": [],
   "source": [
    "img = np.zeros(IMAGE.shape)\n",
    "for i, pos in enumerate(processeddata['test'].pos):\n",
    "    img[pos[0], pos[1]] = processeddata['test'].patch[i][:,4,4]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1fe32aee",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "plt.imshow(img[:,:,[30,15,10]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d27a66d7",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "p = processeddata['test'].patch[idx]\n",
    "for i in p[:,:,4,4].sum(axis=0)/p.shape[0]:print(i)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0b49ea4c",
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in a:\n",
    "    print(i)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4e6f569f",
   "metadata": {},
   "outputs": [],
   "source": [
    "r = T.model(torch.from_numpy(processeddata['test'].patch[-10:-1]).float().cuda())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e17f9758",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "resultpath + 'bestmodel.pth'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "id": "45ec20f5",
   "metadata": {},
   "outputs": [],
   "source": [
    "NViT = ViT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "id": "03cfe69d",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from einops import rearrange, repeat\n",
    "\n",
    "\n",
    "class Residual(nn.Module):\n",
    "    def __init__(self, fn):\n",
    "        super().__init__()\n",
    "        self.fn = fn\n",
    "    def forward(self, x, **kwargs):\n",
    "        return self.fn(x, **kwargs) + x\n",
    "\n",
    "class PreNorm(nn.Module):\n",
    "    def __init__(self, dim, fn):\n",
    "        super().__init__()\n",
    "        self.norm = nn.LayerNorm(dim)\n",
    "        self.fn = fn\n",
    "    def forward(self, x, **kwargs):\n",
    "        return self.fn(self.norm(x), **kwargs)\n",
    "\n",
    "class FeedForward(nn.Module):\n",
    "    def __init__(self, dim, hidden_dim, dropout = 0.):\n",
    "        super().__init__()\n",
    "        self.net = nn.Sequential(\n",
    "            nn.Linear(dim, hidden_dim),\n",
    "            nn.GELU(),\n",
    "            nn.Dropout(dropout),\n",
    "            nn.Linear(hidden_dim, dim),\n",
    "            nn.Dropout(dropout)\n",
    "        )\n",
    "    def forward(self, x):\n",
    "        return self.net(x)\n",
    "\n",
    "class Attention(nn.Module):\n",
    "    def __init__(self, dim, heads, dim_head, dropout):\n",
    "        super().__init__()\n",
    "        inner_dim = dim_head * heads\n",
    "        self.heads = heads\n",
    "        self.scale = dim_head ** -0.5\n",
    "\n",
    "        self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)\n",
    "        self.to_out = nn.Sequential(\n",
    "            nn.Linear(inner_dim, dim),\n",
    "            nn.Dropout(dropout)\n",
    "        )\n",
    "    def forward(self, x, mask = None):\n",
    "        # x:[b,n,dim]\n",
    "        b, n, _, h = *x.shape, self.heads\n",
    "\n",
    "        # get qkv tuple:([b,n,head_num*head_dim],[...],[...])\n",
    "        qkv = self.to_qkv(x).chunk(3, dim = -1)\n",
    "        # split q,k,v from [b,n,head_num*head_dim] -> [b,head_num,n,head_dim]\n",
    "        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)\n",
    "\n",
    "        # transpose(k) * q / sqrt(head_dim) -> [b,head_num,n,n]\n",
    "        dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale\n",
    "        mask_value = -torch.finfo(dots.dtype).max\n",
    "\n",
    "        # mask value: -inf\n",
    "        if mask is not None:\n",
    "            mask = F.pad(mask.flatten(1), (1, 0), value = True)\n",
    "            assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions'\n",
    "            mask = mask[:, None, :] * mask[:, :, None]\n",
    "            dots.masked_fill_(~mask, mask_value)\n",
    "            del mask\n",
    "\n",
    "        # softmax normalization -> attention matrix\n",
    "        attn = dots.softmax(dim=-1)\n",
    "        # value * attention matrix -> output\n",
    "        out = torch.einsum('bhij,bhjd->bhid', attn, v)\n",
    "        # cat all output -> [b, n, head_num*head_dim]\n",
    "        out = rearrange(out, 'b h n d -> b n (h d)')\n",
    "        out = self.to_out(out)\n",
    "        return out\n",
    "\n",
    "class Transformer(nn.Module):\n",
    "    def __init__(self, \n",
    "                dim, \n",
    "                depth, \n",
    "                heads, \n",
    "                dim_head, \n",
    "                mlp_head, \n",
    "                dropout, ):\n",
    "        super().__init__()\n",
    "        self.depth = depth\n",
    "        self.layers = nn.ModuleList([])\n",
    "        self.pos_embedding = nn.Parameter(torch.randn(1, depth, 61*dim))\n",
    "        ##############\n",
    "        for _ in range(depth):\n",
    "            self.layers.append(nn.ModuleList([\n",
    "                Residual(PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout))),\n",
    "                Residual(PreNorm(dim, FeedForward(dim, mlp_head, dropout = dropout)))\n",
    "            ]))\n",
    "        ##############\n",
    "\n",
    "        #######捕获层之间关系########\n",
    "        self.catchlayer = nn.Sequential(\n",
    "                Residual(PreNorm(61*dim, Attention(61*dim, heads = 4, dim_head = 64, dropout = 0.1))),\n",
    "                Residual(PreNorm(61*dim, FeedForward(61*dim, 64, dropout = 0.1)))\n",
    "            )\n",
    "        \n",
    "        self.conv = nn.Conv2d(depth, 1, 1, stride=1)\n",
    "        # self.alpha = nn.Parameter(torch.randn(1, 61, dim)) \n",
    "        self.alpha = nn.Parameter(torch.randn(1)) \n",
    "\n",
    "\n",
    "\n",
    "    def forward(self, x, mask = None):\n",
    "        features = []  #原来的特征，64\n",
    "        for attn, ff in self.layers:\n",
    "            x = attn(x) \n",
    "            x = ff(x)\n",
    "            features.append(x)\n",
    "            \n",
    "        _, H, W = x.shape\n",
    "        for i in range(self.depth):\n",
    "            features[i] = features[i].flatten(1)\n",
    "\n",
    "        for i in range(self.depth):\n",
    "            features[i] = features[i].unsqueeze(1)\n",
    "            \n",
    "        FEA = torch.cat(features, dim=1)\n",
    "        FEA = FEA + self.pos_embedding\n",
    "        FEA = self.catchlayer(FEA)\n",
    "        FEA = rearrange(FEA, 'b n (h w) -> b n h w', h=H,w=W)\n",
    "        FEA = self.conv(FEA)\n",
    "        # x = x + torch.mul(self.alpha, FEA.squeeze())\n",
    "        x = x + FEA.squeeze()\n",
    "        # x = x + self.alpha*FEA.squeeze()\n",
    "        return x   # 按通道数合并特征   64 + 6*32=256\n",
    "\n",
    "class ViT(nn.Module):\n",
    "    def __init__(self, \n",
    "                image_size=9,\n",
    "                near_band=1, \n",
    "                num_patches=60, \n",
    "                num_classes=2, \n",
    "                dim=64, \n",
    "                depth=3, \n",
    "                heads=4, \n",
    "                mlp_dim=8, \n",
    "                pool='cls', \n",
    "                dim_head = 16, \n",
    "                dropout=0., \n",
    "                emb_dropout=0., \n",
    "                mode='CAF'):\n",
    "        super().__init__()\n",
    "\n",
    "        patch_dim = image_size ** 2 * near_band\n",
    "\n",
    "        self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))\n",
    "        self.patch_to_embedding = nn.Linear(patch_dim, dim)\n",
    "        self.cls_token = nn.Parameter(torch.randn(1, 1, dim))\n",
    "\n",
    "        self.dropout = nn.Dropout(emb_dropout)\n",
    "        self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)\n",
    "\n",
    "        self.pool = pool\n",
    "        self.to_latent = nn.Identity()\n",
    "\n",
    "        self.mlp_head1 = nn.Sequential(\n",
    "            nn.LayerNorm(dim),\n",
    "            nn.Linear(dim, 1),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "\n",
    "        self.mlp_head2 = nn.Sequential(\n",
    "            nn.LayerNorm(dim),\n",
    "            nn.Linear(dim, 1),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "\n",
    "\n",
    "    def forward(self, x, mask = None):\n",
    "\n",
    "        # patchs[batch, patch_num, patch_size*patch_size*c]  [batch,200,145*145]\n",
    "        x = rearrange(x, 'b c h w -> b c (h w)')\n",
    "\n",
    "        ## embedding every patch vector to embedding size: [batch, patch_num, embedding_size]\n",
    "        x = self.patch_to_embedding(x) #[b,n,dim]\n",
    "        b, n, _ = x.shape\n",
    "\n",
    "        # add position embedding\n",
    "        cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) #[b,1,dim]\n",
    "        x = torch.cat((cls_tokens, x), dim = 1) #[b,n+1,dim]\n",
    "        x += self.pos_embedding[:, :(n + 1)]\n",
    "        x = self.dropout(x)\n",
    "\n",
    "        # transformer: x[b,n + 1,dim] -> x[b,n + 1,dim]\n",
    "        x = self.transformer(x, mask)\n",
    "\n",
    "        # classification: using cls_token output\n",
    "        x = self.to_latent(x[:,0])\n",
    "\n",
    "        # MLP classification layer\n",
    "        return self.mlp_head1(x).squeeze(), self.mlp_head2(x).squeeze()\n",
    "        # return self.mlp_head2(x).squeeze()\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ded84588",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
