{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\Anaconda install\\envs\\pytorch\\lib\\site-packages\\tqdm\\auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from torchvision import datasets\n",
    "from matplotlib import pyplot as plt\n",
    "import torch.nn as nn\n",
    "from torchvision import transforms\n",
    "import torch.optim as optim"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 正则化\n",
    "\n",
    "1. 检查系数：权重惩罚\n",
    "\n",
    "在损失中添加一个正则化项：它是模型中所有权重的平方和\n",
    "\n",
    "目的主要就是为了，避免模型过度复杂从而导致过拟合现象\n",
    "\n",
    "在PyTorch中的SGD优化器已经有一个weight_decay参数，等价于已经加入上述思想"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Dropout\n",
    "\n",
    "Dropout：A Simple Way to Prevent Neural Network from Overfitting\n",
    "\n",
    "思想：将网络每轮训练迭代中的神经元随机部分清零。\n",
    "\n",
    "\n",
    "\n",
    "在具体操作中，通过在非线性激活与后面的线性层或卷积模块之间添加一个nn.Droupout模块来实现。同时需要指定输入归零的概率。\n",
    "\n",
    "如果是卷积需要适用专门的nn.Dropout2d 或nn.Dropout3d。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "class NetDropout(nn.Module):\n",
    "    def __init__(self,n_chans1):\n",
    "        super().__init__()\n",
    "        self.n_chans1=n_chans1\n",
    "        self.conv1 = nn.Conv2d(3,n_chans1,kernel_size=3,padding=1) #卷积\n",
    "        self.pool1 = nn.MaxPool2d(2)#池化\n",
    "        self.conv1_dropout = nn.Dropout2d(p=0.4)#Dropout\n",
    "\n",
    "        self.conv2 = nn.Conv2d(n_chans1,n_chans1//2,kernel_size=3,padding=1)\n",
    "        self.pool2 = nn.MaxPool2d(2)\n",
    "        self.conv2_dropout = nn.Dropout2d(p=0.4)\n",
    "\n",
    "        self.fc1 = nn.Linear(8*8*n_chans1//2,32)\n",
    "        self.fc2 = nn.Linear(32,10)# 10分类问题\n",
    "\n",
    "\n",
    "    def forward(self,x):\n",
    "        out = torch.tanh(self.conv1(x))  #卷积+激活\n",
    "        out = self.pool1(out) #池化\n",
    "        out = self.conv1_dropout(out)# Dropout\n",
    "\n",
    "        out = torch.tanh(self.conv2(out))\n",
    "        out = self.pool2(out)\n",
    "        out = self.conv2_dropout(out)\n",
    "        out = out.view(-1,8*8*self.n_chans1//2)\n",
    "\n",
    "        out = torch.tanh(self.fc1(out))#全连接层\n",
    "        out = self.fc2(out)#全连接层\n",
    "    \n",
    "        return out"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 保持激活检查：批量归一化\n",
    "\n",
    "另一种替代Dropout的方法\n",
    "\n",
    "允许我们提高学习率，减少训练对初始化的依赖，并充当正则化器，提出了一种替代Dropout的方法\n",
    "\n",
    "思想：将输入重新调整到网络的激活状态，从而使小批量具有一定的理想分布\n",
    "\n",
    "Pytorch提供了nn.BatchNorm1d、nn.BatchNorm2d和nn.BatchNorm3d实现批量归一化\n",
    "\n",
    "- 在具体操作中，在线性变换和激活函数之后"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "class NetBatchNorm(nn.Module):\n",
    "    def __init__(self,n_chans1):\n",
    "        super().__init__()\n",
    "        self.n_chans1=n_chans1\n",
    "        self.conv1 = nn.Conv2d(3,n_chans1,kernel_size=3,padding=1) #卷积\n",
    "        self.conv1_batchnorm = nn.BatchNorm2d(num_features=n_chans1)#将输出拉回激活状态 便于下次卷积\n",
    "        self.pool1 = nn.MaxPool2d(2)#池化\n",
    "\n",
    "\n",
    "        self.conv2 = nn.Conv2d(n_chans1,n_chans1//2,kernel_size=3,padding=1)\n",
    "        self.conv2_batchnorm = nn.BatchNorm2d(num_features=n_chans1//2)#将输出拉回激活状态 便于下次卷积\n",
    "        self.pool2 = nn.MaxPool2d(2)\n",
    "\n",
    "\n",
    "        self.fc1 = nn.Linear(8*8*n_chans1//2,32)\n",
    "        self.fc2 = nn.Linear(32,10)# 10分类问题\n",
    "\n",
    "\n",
    "    def forward(self,x):\n",
    "        out = self.conv1(x) #卷积\n",
    "        out = self.conv1_batchnorm(out) #正则化\n",
    "        out = torch.tanh(out) #激活\n",
    "        out = self.pool1(out) #池化\n",
    "\n",
    "        out = self.conv2(out)#卷积\n",
    "        out = self.conv2_batchnorm(out) #正则化\n",
    "        out = torch.tanh(out) #激活\n",
    "        out = self.pool2(out) #池化\n",
    "\n",
    "        out = out.view(-1,8*8*self.n_chans1//2)#变换维度\n",
    "        out = self.fc1(out)#全连接\n",
    "        out = torch.tanh(out) #激活\n",
    "        out = self.fc2(out) #输出\n",
    "    \n",
    "        return out"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 更深层的结构\n",
    "\n",
    "Resnet残差连接模块\n",
    "\n",
    "![](imgsResorce\\Resnet.jpg)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "class NetRes(nn.Module):\n",
    "    def __init__(self,n_chans1=32):\n",
    "        super().__init__()\n",
    "        ## 定义网络模块\n",
    "        self.n_chans1 = n_chans1\n",
    "        self.conv1 = nn.Conv2d(3,n_chans1,kernel_size=3,padding=1)\n",
    "\n",
    "        self.pool1 = nn.MaxPool2d(2)\n",
    "        self.conv2 = nn.Conv2d(n_chans1,n_chans1//2,kernel_size=3,padding=1)\n",
    "        self.pool2 = nn.MaxPool2d(2)\n",
    "\n",
    "        self.conv3 = nn.Conv2d(n_chans1//2,n_chans1//2,kernel_size=3,padding=1)\n",
    "        self.pool3 = nn.MaxPool2d(2)\n",
    "\n",
    "        self.fc1 = nn.Linear(4*4*n_chans1,32)\n",
    "        self.fc2 = nn.Linear(32,10)#10分类\n",
    "\n",
    "\n",
    "\n",
    "    def forward(self,x):\n",
    "        out = torch.relu(self.conv1(x))  #卷积+激活\n",
    "        out = self.pool1(out) #池化\n",
    "\n",
    "        out = torch.relu(self.conv2(out)) #第二次卷积+激活\n",
    "        out = self.pool2(out)#池化\n",
    "\n",
    "        out1 = out  #先把卷积两次后的结果暂存\n",
    "\n",
    "        out = torch.relu(self.conv3(out)) \n",
    "        #短接\n",
    "        out = self.pool3(out + out1) #若out=0 ，相当于直接短接\n",
    "\n",
    "        out = out.view(-1,4*4*self.n_chans1//2)\n",
    "\n",
    "        ## 全连接层\n",
    "        out = torch.relu(self.fc1(out)) \n",
    "        out = self.fc2(out)\n",
    "\n",
    "        return out "
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 利用PyToch中 残差块构建残差网络\n",
    "\n",
    "![](imgsResorce/ResNetBlock.jpg)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 定义残差连接\n",
    "class ResBlock(nn.Module):\n",
    "    def __init__(self,n_chans=32,n_blocks=10):\n",
    "        super().__init__()\n",
    "        self.conv = nn.Conv2d(n_chans,n_chans,kernel_size=3,padding=1,bias=False) #批范数层会抵消偏置的影响，因此它通常排除在外\n",
    "        self.batch_norm = nn.BatchNorm2d(num_features=n_chans)\n",
    "        nn.init.kaiming_normal_(self.conv.weight,nonlinearity='relu')\n",
    "        nn.init.constant_(self.batch_norm.weight,0.5)\n",
    "        torch.nn.init.zeros_(self.batch_norm.bias) #适用自定义的初始化。Kaiming_normal_()用ResNet论文中计算标准差的正太随机元素初始化。批范数被初始化以产生初始时平均值为0方差为0.5的输出分布\n",
    "\n",
    "    def forward(self,x):\n",
    "        out = self.conv(x)\n",
    "        out = self.batch_norm(out)\n",
    "        out = torch.relu(out) \n",
    "        return out + x\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 生成一个具有100个块的网络\n",
    "\n",
    "class NetResDeep(nn.Module):\n",
    "    def __init__(self,n_chans1=32,n_blocks=10):\n",
    "        super().__init__()\n",
    "        #通道数\n",
    "        self.n_chans1 = n_chans1\n",
    "        #第一次卷积\n",
    "        self.conv1 = nn.Conv2d(3,n_chans1,kernel_size=3,padding=1)\n",
    "\n",
    "        \n",
    "        #池化\n",
    "        self.pool = nn.MaxPool2d(2)\n",
    "\n",
    "        ##残差模块\n",
    "        self.resblocks = nn.Sequential(\n",
    "            *(n_blocks*[ResBlock(n_chans=n_chans1)]) )\n",
    "        \n",
    "        #全连接层\n",
    "        self.fc1 = nn.Linear(8*8*n_chans1,32)\n",
    "        self.fc2 = nn.Linear(32,10)\n",
    "\n",
    "    def forward(self,x):\n",
    "        out = torch.relu(self.conv1(x)) #卷积\n",
    "        out = self.pool(out) #池化\n",
    "        \n",
    "        #残差块\n",
    "        out = self.resblocks(out)\n",
    "\n",
    "        out = self.pool(out)#池化\n",
    "\n",
    "        #改变维度\n",
    "        out = out.view(-1,8*8*self.n_chans1)\n",
    "\n",
    "        out = torch.relu(self.fc1(out))  #全连接层\n",
    "        out = self.fc2(out)\n",
    "        \n",
    "        return out\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- 下面利用上述100层块的残差神经网络进行10分类实验"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "import datetime"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "## 数据导入\n",
    "## 这里数据用cifar10\n",
    "\n",
    "data_path=r'../Data'\n",
    "# 原始数据\n",
    "cifar10=datasets.CIFAR10(data_path,train=True,download=False,transform=transforms.ToTensor())\n",
    "\n",
    "# 训练数据\n",
    "transformed_cifar10 = datasets.CIFAR10(data_path,train=True,download=True,transform=transforms.ToTensor())\n",
    "imgs = torch.stack([img_t for img_t,_ in transformed_cifar10],dim=3)\n",
    "mean = imgs.view(3,-1).mean(dim=1)\n",
    "std = imgs.view(3,-1).std(dim=1)\n",
    "transformed_cifar10=datasets.CIFAR10(data_path,train=True,download=True,transform=transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=mean,std=std)\n",
    "]))\n",
    "\n",
    "transformed_cifar10_val = datasets.CIFAR10(data_path,train=False,download=True,transform=transforms.ToTensor())\n",
    "imgs = torch.stack([img_t for img_t,_ in transformed_cifar10],dim=3)\n",
    "mean = imgs.view(3,-1).mean(dim=1)\n",
    "std = imgs.view(3,-1).std(dim=1)\n",
    "transformed_cifar10_val=datasets.CIFAR10(data_path,train=False,download=True,transform=transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=mean,std=std)\n",
    "]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 训练函数\n",
    "def training_loop(n_epochs,optimizer,model,loss_fn,train_loader):\n",
    "    for epoch in range(1,n_epochs+1):\n",
    "        loss_train=0.0\n",
    "        for imgs,labels in train_loader:\n",
    "            imgs = imgs.to(device='cuda')\n",
    "            labels = labels.to(device='cuda')\n",
    "            model = model.to(device='cuda')\n",
    "            outputs = model(imgs)\n",
    "            loss = loss_fn(outputs,labels)\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            loss_train += loss.item()\n",
    "        if epoch == 1 or epoch % 10 == 0:\n",
    "            print(\"{} Epoch:{}, TrainingLoss:{}\".format(datetime.datetime.now(),epoch,loss_train/len(train_loader)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2023-03-04 19:08:18.363538 Epoch:1, TrainingLoss:0.008276891861217515\n",
      "2023-03-04 19:10:38.451933 Epoch:10, TrainingLoss:0.006333022373509617\n",
      "2023-03-04 19:13:13.488253 Epoch:20, TrainingLoss:0.04498052944475666\n",
      "2023-03-04 19:15:51.053470 Epoch:30, TrainingLoss:0.00570550036538914\n",
      "2023-03-04 19:18:27.372421 Epoch:40, TrainingLoss:0.005028797319302421\n",
      "2023-03-04 19:21:02.983205 Epoch:50, TrainingLoss:0.0032720737713936936\n",
      "2023-03-04 19:23:38.498245 Epoch:60, TrainingLoss:0.018493834596625684\n",
      "2023-03-04 19:26:14.293587 Epoch:70, TrainingLoss:0.002907948246460332\n",
      "2023-03-04 19:28:49.881470 Epoch:80, TrainingLoss:0.0033643934364734895\n",
      "2023-03-04 19:31:25.416460 Epoch:90, TrainingLoss:0.0028838686681652674\n",
      "2023-03-04 19:34:01.131090 Epoch:100, TrainingLoss:0.00176444868535197\n"
     ]
    }
   ],
   "source": [
    "train_loader = torch.utils.data.DataLoader(transformed_cifar10,batch_size=64,shuffle=True) #数据载入\n",
    "model = NetResDeep() #模型加载 \n",
    "model.load_state_dict(torch.load(\"cifar10_ResNet100.pt\"))\n",
    "optimizer = optim.SGD(model.parameters(),lr=3e-3) #优化器\n",
    "loss_fn = nn.CrossEntropyLoss() #损失函数\n",
    "\n",
    "\n",
    "training_loop(\n",
    "    n_epochs=100,\n",
    "    optimizer=optimizer,\n",
    "    model=model,\n",
    "    loss_fn=loss_fn,\n",
    "    train_loader=train_loader\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy train:99.99%\n",
      "Accuracy val:46.01%\n"
     ]
    }
   ],
   "source": [
    "## 结果验证\n",
    "train_loader = torch.utils.data.DataLoader(transformed_cifar10,batch_size=64,shuffle=False)\n",
    "val_loader = torch.utils.data.DataLoader(transformed_cifar10_val,batch_size=64,shuffle=False)\n",
    "\n",
    "def validate(model, train_loader, val_loader):\n",
    "    for name, loader in [(\"train\", train_loader), (\"val\", val_loader)]:\n",
    "        correct = 0\n",
    "        total = 0\n",
    "        with torch.no_grad():\n",
    "            for imgs,labels in loader:\n",
    "                imgs=imgs.to(device='cuda')\n",
    "                labels = labels.to(device='cuda')\n",
    "                model = model.to(device='cuda')\n",
    "                outputs = model(imgs)\n",
    "                _,predicted = torch.max(outputs,dim=1)\n",
    "                total += labels.shape[0]\n",
    "                correct += int((predicted==labels).sum())\n",
    "            print(\"Accuracy {}:{:.2f}\".format(name,correct*100/total)+\"%\")\n",
    "\n",
    "\n",
    "validate(model,train_loader,val_loader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 保存参数\n",
    "torch.save(model.state_dict(),\"cifar10_ResNet100.pt\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "e8f28fbeb112014ba6c333544755f206e5d677c1e329d1a6cce6c1b79d7517a3"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
