{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import random\n",
    "\n",
    "import numpy as np\n",
    "#加载飞桨和相关类库\n",
    "import paddle\n",
    "import paddle.fluid as fluid\n",
    "\n",
    "from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear\n",
    "from pydicom import dcmread\n",
    "from PIL import Image\n",
    "\n",
    "from rich import print\n",
    "from rich.progress import Progress\n",
    "\n",
    "from skimage.transform import resize"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 准备data loader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">Total num of LUAD: <span style=\"color: #000080; font-weight: bold\">46787</span>; LUSC: <span style=\"color: #000080; font-weight: bold\">34570</span>\n",
       "</pre>\n"
      ],
      "text/plain": [
       "<rich.jupyter.JupyterRenderable at 0x7f5653c8a2e0>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "def get_all_dcm_files(input_dir: str):\n",
    "    u\"\"\"\n",
    "    os.walk to get all files with sufix .dcm\n",
    "    \"\"\"\n",
    "    fs = []\n",
    "    \n",
    "    for parent, _, files in os.walk(input_dir):\n",
    "        for f in files:\n",
    "            if f.endswith(\".dcm\"):\n",
    "                fs.append(os.path.join(parent, f))\n",
    "    return fs\n",
    "\n",
    "LUAD = get_all_dcm_files(\"data/Lung_CT/TCGA-LUAD\")\n",
    "LUSC = get_all_dcm_files(\"data/Lung_CT/TCGA-LUSC\")\n",
    "\n",
    "print(f\"Total num of LUAD: {len(LUAD)}; LUSC: {len(LUSC)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\">(</span><span style=\"color: #000080; font-weight: bold\">512</span>, <span style=\"color: #000080; font-weight: bold\">512</span><span style=\"font-weight: bold\">)</span>\n",
       "</pre>\n"
      ],
      "text/plain": [
       "<rich.jupyter.JupyterRenderable at 0x7f5653bf8f70>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "ds = dcmread(LUAD[0])\n",
    "\n",
    "print(ds.pixel_array.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# data generater loader\n",
    "CT = [[x, \"LUAD\"] for x in sorted(LUAD)]\n",
    "CT +=[[x, \"LUSC\"] for x in sorted(LUSC)]\n",
    "\n",
    "onehot = {\n",
    "    \"LUAD\": 0,\n",
    "    \"LUSC\": 1\n",
    "}\n",
    "\n",
    "random.seed(42)\n",
    "random.shuffle(CT)\n",
    "\n",
    "def data_generator(data, batch_size:int=64, image_size:int = 512):\n",
    "    u\"\"\"\n",
    "    read dcm file and generate the image and label\n",
    "    \"\"\"\n",
    "\n",
    "    i = 0\n",
    "    res = []\n",
    "    while i < len(data):\n",
    "        img, label = data[i]\n",
    "        try:\n",
    "            img = dcmread(img)\n",
    "            \n",
    "            img = img.pixel_array\n",
    "            \n",
    "            if any([x != image_size for x in img.shape]):\n",
    "                img = resize(img, (image_size, image_size), anti_aliasing=True)\n",
    "            \n",
    "            res.append([img.astype(\"float32\"), onehot[label]])\n",
    "\n",
    "            if len(res) >= batch_size:\n",
    "                yield res\n",
    "                res = []\n",
    "        except Exception as err:\n",
    "            print(err)\n",
    "            pass\n",
    "        finally:\n",
    "            i += 1\n",
    "       \n",
    "    if len(res) == batch_size:\n",
    "        yield res\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义mnist数据识别网络结构，同房价预测网络\n",
    "class MNIST(fluid.dygraph.Layer):\n",
    "    def __init__(self):\n",
    "        super(MNIST, self).__init__()\n",
    "        \n",
    "        # 定义一层全连接层，输出维度是1，激活函数为None，即不使用激活函数\n",
    "        self.fc = Linear(input_dim=512 * 512, output_dim=1, act=None)\n",
    "        \n",
    "    # 定义网络结构的前向计算过程\n",
    "    def forward(self, inputs):\n",
    "        outputs = self.fc(inputs)\n",
    "        return outputs\n",
    "\n",
    "\n",
    "# 通过with语句创建一个dygraph运行的context\n",
    "# 动态图下的一些操作需要在guard下进行\n",
    "with fluid.dygraph.guard():\n",
    "    model = MNIST()\n",
    "    model.train()\n",
    "    train_loader = data_generator(CT[:10000], batch_size:=16)\n",
    "    optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001, parameter_list=model.parameters())\n",
    "    EPOCH_NUM = 10\n",
    "    for epoch_id in range(EPOCH_NUM):\n",
    "        for batch_id, data in enumerate(train_loader):\n",
    "            #准备数据，格式需要转换成符合框架要求\n",
    "            try:\n",
    "                image_data = np.array([x[0].flattern() for x in data]).astype(\"float32\")\n",
    "                label_data = np.array([x[1] for x in data]).astype('float32').reshape(-1, 1)\n",
    "                # 将数据转为飞桨动态图格式\n",
    "                image = fluid.dygraph.to_variable(image_data)\n",
    "                label = fluid.dygraph.to_variable(label_data)\n",
    "\n",
    "                #前向计算的过程\n",
    "                predict = model(image)\n",
    "\n",
    "                #计算损失，取一个批次样本损失的平均值\n",
    "                loss = fluid.layers.square_error_cost(predict, label)\n",
    "                avg_loss = fluid.layers.mean(loss)\n",
    "\n",
    "                #每训练了1000批次的数据，打印下当前Loss的情况\n",
    "                if batch_id != 0:\n",
    "                    print(f\"epoch: {epoch_id}, batch: {batch_id}, loss is: {avg_loss.numpy()}\")\n",
    "\n",
    "                #后向传播，更新参数的过程\n",
    "                avg_loss.backward()\n",
    "                optimizer.minimize(avg_loss)\n",
    "                model.clear_gradients()\n",
    "            except Exception as err:\n",
    "                print(err)\n",
    "                break\n",
    "\n",
    "    # 保存模型\n",
    "    # fluid.save_dygraph(model.state_dict(), 'mnist')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义 LeNet 网络结构\n",
    "class LeNet(fluid.dygraph.Layer):\n",
    "    def __init__(self, num_classes=1):\n",
    "        super(LeNet, self).__init__()\n",
    "\n",
    "        # 创建卷积和池化层块，每个卷积层使用Sigmoid激活函数，后面跟着一个2x2的池化\n",
    "        self.conv1 = Conv2D(num_channels=1, num_filters=6, filter_size=5, act='sigmoid')\n",
    "        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')\n",
    "        self.conv2 = Conv2D(num_channels=6, num_filters=16, filter_size=5, act='sigmoid')\n",
    "        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')\n",
    "        # 创建第3个卷积层\n",
    "        self.conv3 = Conv2D(num_channels=16, num_filters=120, filter_size=4, act='sigmoid')\n",
    "        # 创建全连接层，第一个全连接层的输出神经元个数为64， 第二个全连接层输出神经元个数为分类标签的类别数\n",
    "        self.fc1 = Linear(input_dim=512 * 512, output_dim=64, act='sigmoid')\n",
    "        self.fc2 = Linear(input_dim=64, output_dim=num_classes)\n",
    "    # 网络的前向计算过程\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.pool1(x)\n",
    "        x = self.conv2(x)\n",
    "        x = self.pool2(x)\n",
    "        x = self.conv3(x)\n",
    "        x = fluid.layers.reshape(x, [x.shape[0], -1])\n",
    "        x = self.fc1(x)\n",
    "        x = self.fc2(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义训练过程\n",
    "def train(model):\n",
    "    print('start training ... ')\n",
    "    model.train()\n",
    "    epoch_num = 5\n",
    "    opt = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9, parameter_list=model.parameters())\n",
    "    # 使用Paddle自带的数据读取器\n",
    "    train_loader = data_generator(CT[:10000], batch_size:=16)\n",
    "    valid_loader = data_generator(CT[10000:20000], batch_size:=16)\n",
    "    for epoch in range(epoch_num):\n",
    "        for batch_id, data in enumerate(train_loader):\n",
    "            try:\n",
    "                # 调整输入数据形状和类型\n",
    "                x_data = np.array([item[0] for item in data], dtype='float32').reshape(-1, 1, 512, 512)\n",
    "                y_data = np.array([item[1] for item in data], dtype='int64').reshape(-1, 1)\n",
    "                # 将numpy.ndarray转化成Tensor\n",
    "                img = fluid.dygraph.to_variable(x_data)\n",
    "                label = fluid.dygraph.to_variable(y_data)\n",
    "                # 计算模型输出\n",
    "                logits = model(img)\n",
    "                # 计算损失函数\n",
    "                loss = fluid.layers.softmax_with_cross_entropy(logits, label)\n",
    "                avg_loss = fluid.layers.mean(loss)\n",
    "                if batch_id % 1000 == 0:\n",
    "                    print(\"epoch: {}, batch_id: {}, loss is: {}\".format(epoch, batch_id, avg_loss.numpy()))\n",
    "                avg_loss.backward()\n",
    "                opt.minimize(avg_loss)\n",
    "                model.clear_gradients()\n",
    "            except Exception as err:\n",
    "                print(err)\n",
    "                break\n",
    "            \n",
    "\n",
    "        model.eval()\n",
    "        accuracies = []\n",
    "        losses = []\n",
    "        for batch_id, data in enumerate(valid_loader):\n",
    "            try:\n",
    "                # 调整输入数据形状和类型\n",
    "                x_data = np.array([item[0] for item in data], dtype='float32').reshape(-1, 1, 28, 28)\n",
    "                y_data = np.array([item[1] for item in data], dtype='int64').reshape(-1, 1)\n",
    "                # 将numpy.ndarray转化成Tensor\n",
    "                img = fluid.dygraph.to_variable(x_data)\n",
    "                label = fluid.dygraph.to_variable(y_data)\n",
    "                # 计算模型输出\n",
    "                logits = model(img)\n",
    "                pred = fluid.layers.softmax(logits)\n",
    "                # 计算损失函数\n",
    "                loss = fluid.layers.softmax_with_cross_entropy(logits, label)\n",
    "                acc = fluid.layers.accuracy(pred, label)\n",
    "                accuracies.append(acc.numpy())\n",
    "                losses.append(loss.numpy())\n",
    "            except Exception as err:\n",
    "                print(err)\n",
    "                break\n",
    "        print(\"[validation] accuracy/loss: {}/{}\".format(np.mean(accuracies), np.mean(losses)))\n",
    "        model.train()\n",
    "\n",
    "    # 保存模型参数\n",
    "    # fluid.save_dygraph(model.state_dict(), 'mnist')\n",
    "    \n",
    "train(LeNet())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## PyTorch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/mnt/data8/zhangyiming/.env/pyenv/versions/3.8.1/lib/python3.8/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n",
      "  and should_run_async(code)\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import pickle\n",
    "import random\n",
    "\n",
    "from multiprocessing import Pool\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torch.utils as utils\n",
    "\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "import torchvision.models as models\n",
    "\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "\n",
    "from pydicom import dcmread\n",
    "from pydicom.pixel_data_handlers.util import apply_color_lut\n",
    "\n",
    "from PIL import Image\n",
    "\n",
    "from rich import print\n",
    "from rich.progress import Progress\n",
    "\n",
    "from skimage.transform import resize\n",
    "\n",
    "\n",
    "%matplotlib inline\n",
    "\n",
    "\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/mnt/data8/zhangyiming/.env/pyenv/versions/3.8.1/lib/python3.8/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n",
      "  and should_run_async(code)\n"
     ]
    }
   ],
   "source": [
    "class CT(torch.utils.data.Dataset):\n",
    "    def __init__(self, LUAD: str = \"../data/Lung_CT/TCGA-LUAD\", LUSC: str = \"../data/Lung_CT/TCGA-LUSC\", n_jobs: int = 10, skip_valid: bool = False, seed: int=42, to_gray: bool = True):\n",
    "        super(CT).__init__()\n",
    "        \n",
    "        self.imgs = [[os.path.abspath(x), 0] for x in self.get_all_dcm_files(LUAD, n_jobs, skip_valid)]\n",
    "        self.imgs += [[os.path.abspath(x), 1] for x in self.get_all_dcm_files(LUSC, n_jobs, skip_valid)]\n",
    "        self.imgs = sorted(self.imgs, key=lambda x:x[0])\n",
    "        \n",
    "        random.seed(seed)\n",
    "        random.shuffle(self.imgs)\n",
    "        \n",
    "        self.to_gray = to_gray\n",
    "        \n",
    "        self.transform = transforms.Compose([\n",
    "            transforms.Resize(256),\n",
    "            transforms.CenterCrop(256),\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize(0.5,  0.5)\n",
    "        ])\n",
    "    \n",
    "    @classmethod\n",
    "    def get_PIL_image(cls, dataset):\n",
    "        \"\"\"Get Image object from Python Imaging Library(PIL)\"\"\"\n",
    "        \n",
    "        def get_LUT_value(data, window, level):\n",
    "            \"\"\"Apply the RGB Look-Up Table for the given\n",
    "               data and window/level value.\"\"\"\n",
    "\n",
    "\n",
    "            return np.piecewise(data,\n",
    "                                [data <= (level - 0.5 - (window - 1) / 2),\n",
    "                                 data > (level - 0.5 + (window - 1) / 2)],\n",
    "                                [0, 255, lambda data: ((data - (level - 0.5)) /\n",
    "                                 (window - 1) + 0.5) * (255 - 0)])\n",
    "\n",
    "        if ('PixelData' not in dataset):\n",
    "            raise TypeError(\"Cannot show image -- DICOM dataset does not have \"\n",
    "                            \"pixel data\")\n",
    "        # can only apply LUT if these window info exists\n",
    "        if ('WindowWidth' not in dataset) or ('WindowCenter' not in dataset):\n",
    "            bits = dataset.BitsAllocated\n",
    "            samples = dataset.SamplesPerPixel\n",
    "            if bits == 8 and samples == 1:\n",
    "                mode = \"L\"\n",
    "            elif bits == 8 and samples == 3:\n",
    "                mode = \"RGB\"\n",
    "            elif bits == 16:\n",
    "                # not sure about this -- PIL source says is 'experimental'\n",
    "                # and no documentation. Also, should bytes swap depending\n",
    "                # on endian of file and system??\n",
    "                mode = \"I;16\"\n",
    "            else:\n",
    "                raise TypeError(\"Don't know PIL mode for %d BitsAllocated \"\n",
    "                                \"and %d SamplesPerPixel\" % (bits, samples))\n",
    "            \n",
    "            print(mode)\n",
    "            # PIL size = (width, height)\n",
    "            size = (dataset.Columns, dataset.Rows)\n",
    "\n",
    "            # Recommended to specify all details\n",
    "            # by http://www.pythonware.com/library/pil/handbook/image.htm\n",
    "            im = Image.frombuffer(mode, size, dataset.PixelData,\n",
    "                                      \"raw\", mode, 0, 1)\n",
    "        else:\n",
    "            ew = dataset['WindowWidth']\n",
    "            ec = dataset['WindowCenter']\n",
    "            ww = int(ew.value[0] if ew.VM > 1 else ew.value)\n",
    "            wc = int(ec.value[0] if ec.VM > 1 else ec.value)\n",
    "            image = get_LUT_value(dataset.pixel_array, ww, wc)\n",
    "            # Convert mode to L since LUT has only 256 values:\n",
    "            #   http://www.pythonware.com/library/pil/handbook/image.htm\n",
    "            im = Image.fromarray(image).convert('L')\n",
    "\n",
    "        return im\n",
    "   \n",
    "    @classmethod\n",
    "    def valid_dcm_file(cls, path: str):\n",
    "        try:\n",
    "            ds = dcmread(path)\n",
    "            ds = Image.fromarray(ds.pixel_array)\n",
    "            return path\n",
    "        except Exception as err:\n",
    "            print(err)\n",
    "            if os.path.exists(path):\n",
    "                os.remove(path)\n",
    "            return None\n",
    "        \n",
    "    @classmethod\n",
    "    def get_all_dcm_files(cls, input_dir: str, n_jobs: int, skip_valid: bool):\n",
    "        u\"\"\"\n",
    "        os.walk to get all files with sufix .dcm\n",
    "        \"\"\"\n",
    "        fs = []\n",
    "    \n",
    "        for parent, _, files in os.walk(input_dir):\n",
    "            for f in files:\n",
    "                if f.endswith(\".dcm\"):\n",
    "                    f = os.path.join(parent, f)\n",
    "                    fs.append(f)\n",
    "        \n",
    "        if skip_valid:\n",
    "            return fs\n",
    "        \n",
    "        with Pool(n_jobs) as p:\n",
    "            data = list(p.map(cls.valid_dcm_file, fs))\n",
    "        \n",
    "        return [x for x in data if x]\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.imgs)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        ds = dcmread(self.imgs[index][0])\n",
    "        \n",
    "        try:\n",
    "            ds = Image.fromarray(ds.pixel_array)\n",
    "        except Exception:\n",
    "            ds = Image.fromarray((ds.pixel_array * 255).astype(np.uint8))\n",
    "            \n",
    "        if self.to_gray:\n",
    "            ds = ds.convert(\"L\")\n",
    "        \n",
    "        return self.transform(ds), self.imgs[index][1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_loader = utils.data.DataLoader(CT(skip_valid=True), batch_size = 10, shuffle = False) \n",
    "\n",
    "# for i, j in train_loader:\n",
    "#     print(i)\n",
    "#     print(j)\n",
    "#     break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 254,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Net(\n",
       "  (conv1): Conv2d(1, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "  (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "  (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "  (fc1): Linear(in_features=262144, out_features=128, bias=True)\n",
       "  (fc2): Linear(in_features=128, out_features=10, bias=True)\n",
       "  (dropout): Dropout(p=0.5, inplace=False)\n",
       "  (relu): ReLU()\n",
       ")"
      ]
     },
     "execution_count": 254,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "class Net(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Net, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, padding=1)  # notice the padding\n",
    "        self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1) # again...\n",
    "        self.pool = nn.MaxPool2d(2,2)\n",
    "        self.fc1 = nn.Linear(262144, 128) # it is 64....\n",
    "        self.fc2 = nn.Linear(128, 10)\n",
    "        self.dropout = torch.nn.Dropout(p=0.5)\n",
    "        self.relu = torch.nn.ReLU()\n",
    "        \n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.pool(x)\n",
    "        x = self.conv2(x) \n",
    "        x = self.relu(x)\n",
    "        x = self.pool(x) \n",
    "        x = x.reshape(x.size(0), -1) \n",
    "        x = self.fc1(x) \n",
    "        x = self.dropout(x)\n",
    "        return self.fc2(x) \n",
    "    \n",
    "net = Net()\n",
    "net.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 选择优化器\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(net.parameters(), lr = 0.001, momentum=0.9)\n",
    "\n",
    "epoch_size = 1000\n",
    "for epoch in range(10):\n",
    "    running_loss = 0.0\n",
    "    \n",
    "    for i, data in enumerate(train_loader):\n",
    "        try:\n",
    "            inputs, labels = data\n",
    "            inputs, labels = inputs.to(device), labels.to(device)\n",
    "\n",
    "            optimizer.zero_grad()\n",
    "\n",
    "            outputs = net(inputs)\n",
    "            loss = criterion(outputs, labels)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            running_loss += loss.item()\n",
    "            if i % epoch_size == epoch_size - 1:\n",
    "                print(f\"[{epoch + 1}, {i + 1}] loss: {round(running_loss / epoch_size, 4)}\")\n",
    "                running_loss = 0.0\n",
    "        except ValueError:\n",
    "            continue\n",
    "print(\"Finished\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Modified Resnet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 选择优化器\n",
    "class MyResNet(nn.Module):\n",
    "\n",
    "    def __init__(self, in_channels=1):\n",
    "        super(MyResNet, self).__init__()\n",
    "\n",
    "        # bring resnet\n",
    "        self.model = torchvision.models.resnet18()\n",
    "\n",
    "        # original definition of the first layer on the renset class\n",
    "        # self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n",
    "        \n",
    "        # your case\n",
    "        self.model.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.model(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "net = MyResNet()\n",
    "net.to(device)\n",
    "\n",
    "train_loader = utils.data.DataLoader(CT(skip_valid=True), batch_size = 10, shuffle = False, num_workers = 5)\n",
    "\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(net.parameters(), lr = 0.001, momentum=0.9)\n",
    "\n",
    "epoch_size = 1000\n",
    "for epoch in range(10):\n",
    "    running_loss = 0.0\n",
    "    \n",
    "    for i, data in enumerate(train_loader):\n",
    "        inputs, labels = data\n",
    "        inputs, labels = inputs.to(device), labels.to(device)\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        outputs = net(inputs)\n",
    "        loss = criterion(outputs, labels)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        running_loss += loss.item()\n",
    "        if i % epoch_size == epoch_size - 1:\n",
    "            print(f\"[{epoch + 1}, {i + 1}] loss: {round(running_loss / epoch_size, 4)}\")\n",
    "            running_loss = 0.0\n",
    "\n",
    "torch.save(net.state_dict(), \"myresnet\")\n",
    "print(\"Finished\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "MyResNet(\n",
       "  (model): ResNet(\n",
       "    (conv1): Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n",
       "    (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (relu): ReLU(inplace=True)\n",
       "    (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
       "    (layer1): Sequential(\n",
       "      (0): BasicBlock(\n",
       "        (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (1): BasicBlock(\n",
       "        (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (layer2): Sequential(\n",
       "      (0): BasicBlock(\n",
       "        (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "          (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): BasicBlock(\n",
       "        (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (layer3): Sequential(\n",
       "      (0): BasicBlock(\n",
       "        (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "          (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): BasicBlock(\n",
       "        (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (layer4): Sequential(\n",
       "      (0): BasicBlock(\n",
       "        (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "          (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): BasicBlock(\n",
       "        (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n",
       "    (fc): Linear(in_features=512, out_features=1000, bias=True)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "net = MyResNet()\n",
    "net.load_state_dict(torch.load(\"myresnet\"))\n",
    "net.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_loader = utils.data.DataLoader(\n",
    "    CT(LUAD=\"../data/Lung_CT/CPTAC-LUAD/\", skip_valid=True), \n",
    "    batch_size = 10,\n",
    "    shuffle = True,\n",
    "    num_workers = 5\n",
    ") "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #000080; font-weight: bold\">100.0</span>\n",
       "</pre>\n"
      ],
      "text/plain": [
       "<rich.jupyter.JupyterRenderable at 0x7faf70403940>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #000080; font-weight: bold\">100.0</span>\n",
       "</pre>\n"
      ],
      "text/plain": [
       "<rich.jupyter.JupyterRenderable at 0x7faf70403880>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #000080; font-weight: bold\">100.0</span>\n",
       "</pre>\n"
      ],
      "text/plain": [
       "<rich.jupyter.JupyterRenderable at 0x7faf7031a550>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #000080; font-weight: bold\">100.0</span>\n",
       "</pre>\n"
      ],
      "text/plain": [
       "<rich.jupyter.JupyterRenderable at 0x7faf7031a550>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #000080; font-weight: bold\">100.0</span>\n",
       "</pre>\n"
      ],
      "text/plain": [
       "<rich.jupyter.JupyterRenderable at 0x7faf7031a550>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #000080; font-weight: bold\">100.0</span>\n",
       "</pre>\n"
      ],
      "text/plain": [
       "<rich.jupyter.JupyterRenderable at 0x7faf70403640>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #000080; font-weight: bold\">100.0</span>\n",
       "</pre>\n"
      ],
      "text/plain": [
       "<rich.jupyter.JupyterRenderable at 0x7faf70402550>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "acc = []\n",
    "for i, data in enumerate(test_loader):\n",
    "    inputs, labels\n",
    "    inputs.to(device)\n",
    "    labels.to(labels)\n",
    "    \n",
    "    out = net(inputs)\n",
    "    _, prediction = torch.max(out, 1)  # 按行取最大值\n",
    "    acc.append(sum(prediction == labels).numpy().sum() / len(labels) * 100)\n",
    "    \n",
    "    if i % 100 == 0:\n",
    "        print(np.mean(acc))\n",
    "        \n",
    "    if i > 500:\n",
    "        break\n",
    "\n",
    "print(np.mean(acc))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0, 1, 0, 0, 1, 0, 0, 1, 0, 0])"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
