{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using cache found in C:\\Users\\Administrator.DESKTOP-HN1J6IE/.cache\\torch\\hub\\pytorch_vision_v0.4.2\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 1000])\n",
      "tensor(404)\n",
      "-------------------------------\n",
      "torch.Size([1, 1000])\n",
      "tensor(244)\n",
      "-------------------------------\n",
      "torch.Size([1, 1000])\n",
      "tensor(99)\n",
      "-------------------------------\n",
      "torch.Size([1, 1000])\n",
      "tensor(281)\n",
      "-------------------------------\n",
      "torch.Size([1, 1000])\n",
      "tensor(288)\n",
      "-------------------------------\n",
      "torch.Size([1, 1000])\n",
      "tensor(147)\n",
      "-------------------------------\n",
      "torch.Size([1, 1000])\n",
      "tensor(602)\n",
      "-------------------------------\n",
      "torch.Size([1, 1000])\n",
      "tensor(101)\n",
      "-------------------------------\n",
      "torch.Size([1, 1000])\n",
      "tensor(46)\n",
      "-------------------------------\n",
      "torch.Size([1, 1000])\n",
      "tensor(366)\n",
      "-------------------------------\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.utils.model_zoo import load_url as load_state_dict_from_url\n",
    "\n",
    "__all__ = [\n",
    "    'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',\n",
    "    'vgg19_bn', 'vgg19',\n",
    "]\n",
    "\n",
    "model_urls = {\n",
    "    'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',\n",
    "    'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',\n",
    "    'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',\n",
    "    'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',\n",
    "    'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',\n",
    "    'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',\n",
    "    'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',\n",
    "    'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',\n",
    "}\n",
    "\n",
    "class VGG(nn.Module):\n",
    "\n",
    "    def __init__(self, features, num_classes=1000, init_weights=True):   #只是定义参数，没有执行  参数1000 \n",
    "        super(VGG, self).__init__()    #调用父类的初始函数   \n",
    "        self.features = features\n",
    "        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))  #avgpolling全局平均池化 卷积是二维 和全连接层是一维 的对接  （即每个特征图变成一个数）\n",
    "        self.classifier = nn.Sequential(    #Sequential结构\n",
    "            nn.Linear(512 * 7 * 7, 4096),   #结构和最后的图（后面是分类）一模一样\n",
    "            nn.ReLU(True),      \n",
    "            nn.Dropout(),  #正则化，用在全连接层，让参数更少点\n",
    "            nn.ReLU(True),\n",
    "            nn.Dropout(),\n",
    "            nn.Linear(4096, num_classes),\n",
    "        )\n",
    "        if init_weights:\n",
    "            self._initialize_weights()\n",
    "\n",
    "    def forward(self, x):  #真正执行的时候\n",
    "        x = self.features(x) #特征提取\n",
    "        x = self.avgpool(x)   #平均池化\n",
    "        x = torch.flatten(x, 1) #还是使用 pattern，变成一维\n",
    "        x = self.classifier(x)  #分类\n",
    "        return x\n",
    "\n",
    "    def _initialize_weights(self):   #不同层不同的初始化\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, nn.Conv2d):  #卷积层 的初始化\n",
    "                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') #用reu激活函数，则用kaiming初始化\n",
    "                if m.bias is not None:  #一般偏执设置为0\n",
    "                    nn.init.constant_(m.bias, 0)\n",
    "            elif isinstance(m, nn.BatchNorm2d): #BN层 ，初始化权重为1 \n",
    "                nn.init.constant_(m.weight, 1)\n",
    "                nn.init.constant_(m.bias, 0)\n",
    "            elif isinstance(m, nn.Linear):     #全连接层的初始化\n",
    "                nn.init.normal_(m.weight, 0, 0.01)\n",
    "                nn.init.constant_(m.bias, 0)\n",
    "\n",
    "\n",
    "def make_layers(cfg, batch_norm=False):    #生成层\n",
    "    layers = []\n",
    "    in_channels = 3\n",
    "    for v in cfg:\n",
    "        if v == 'M':\n",
    "            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]   //池化层 \n",
    "        else: #卷积\n",
    "            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) #channels\n",
    "            if batch_norm:\n",
    "                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] #若是batch normalization加入nb和激活函数\n",
    "            else:\n",
    "                layers += [conv2d, nn.ReLU(inplace=True)]\n",
    "            in_channels = v\n",
    "    return nn.Sequential(*layers)   #最后返回一个上面的结构，更容易看  *layers 是一个一个的遍历\n",
    "\n",
    "\n",
    "cfgs = {  #数字为channel的通道数  字母M为池化层  \n",
    "    'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n",
    "    'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n",
    "    'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n",
    "    'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n",
    "}\n",
    "\n",
    "\n",
    "def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):\n",
    "    if pretrained:\n",
    "        kwargs['init_weights'] = False\n",
    "    model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)#用make_layers生成层\n",
    "    if pretrained:\n",
    "        print(1)\n",
    "        state_dict = load_state_dict_from_url(model_urls[arch],    #根据url加载模型\n",
    "                                              progress=progress)\n",
    "        print(2)\n",
    "        model.load_state_dict(state_dict)\n",
    "        print(3)\n",
    "    return model\n",
    "\n",
    "\n",
    "def vgg11(pretrained=False, progress=True, **kwargs):\n",
    "    r\"\"\"VGG 11-layer model (configuration \"A\") from\n",
    "    `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>`_\n",
    "    Args:\n",
    "        pretrained (bool): If True, returns a model pre-trained on ImageNet\n",
    "        progress (bool): If True, displays a progress bar of the download to stderr\n",
    "    \"\"\"\n",
    "    return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)\n",
    "\n",
    "\n",
    "import torch\n",
    "model2 = torch.hub.load('pytorch/vision:v0.4.2', 'vgg11', pretrained=True)\n",
    "model2.eval()\n",
    "# model2 = vgg11(pretrained=True)\n",
    "# model2.eval()\n",
    "model2.state_dict()   #state_dict只包含卷积层和全连接层的参数 +batchnorm's running_mean\n",
    "\n",
    "\n",
    "# sample execution (requires torchvision)\n",
    "from PIL import Image\n",
    "from torchvision import transforms\n",
    "imgs=[\"timg.jpg\",'dog.jpg','niao.jpg','cat.jpg','bao.jpg','jing.jpg','person.jpg','xiang.jpg','xiyi.jpg','yuan.jpg']\n",
    "for img in imgs:\n",
    "    input_image = Image.open(img)\n",
    "\n",
    "    preprocess = transforms.Compose([\n",
    "        transforms.Resize(256),\n",
    "        transforms.CenterCrop(224),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n",
    "    ])\n",
    "    input_tensor = preprocess(input_image)\n",
    "    input_batch = input_tensor.unsqueeze(0) # https://www.cnblogs.com/Archer-Fang/p/10647986.html 扩大容量\n",
    "\n",
    "    # move the input and model to GPU for speed if available\n",
    "    if torch.cuda.is_available():\n",
    "        input_batch = input_batch.to('cuda')\n",
    "        model2.to('cuda')\n",
    "\n",
    "    with torch.no_grad():\n",
    "        output = model2(input_batch)  #根据模型放入vgg中加载成一个实体模型，输入自己的案例获得想要的模型结果\n",
    "    # Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n",
    "    print(output.shape)\n",
    "    # The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n",
    "    # print(torch.nn.functional.softmax(output[0], dim=0))\n",
    "\n",
    "    # result = torch.nn.functional.softmax(output[0], dim=0)\n",
    "\n",
    "    # print(result.argmax())\n",
    "    import numpy as np\n",
    "    print(np.argmax(output))\n",
    "    input_image\n",
    "    print('-------------------------------')\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
