{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# [kaggle][学习向]sf-crime数据的多层神经网络（其二）\n",
    "\n",
    "在上一篇的结尾。。。。。。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import torch\n",
    "from torch import nn\n",
    "from sklearn.model_selection import KFold\n",
    "\n",
    "train_features = np.load('./data/train_features.npy')\n",
    "train_labels = np.load('./data/train_labels_onehot.npy')\n",
    "test_features = np.load('./data/test_features.npy')\n",
    "\n",
    "num_inputs = 21\n",
    "num_outputs = 39\n",
    "\n",
    "class sf_crime():\n",
    "    def __init__(self, num_epochs, k_fold_num, batch_size, k_fold):\n",
    "        self.num_epochs = num_epochs\n",
    "        self.k_fold_num = k_fold_num\n",
    "        self.batch_size = batch_size\n",
    "        self.k_fold = k_fold\n",
    "        self.run()\n",
    "\n",
    "    def make_iter(self, train_features, train_labels):\n",
    "        train_features = torch.tensor(train_features, dtype=torch.float).to(device)\n",
    "        train_labels = torch.tensor(train_labels).to(device)\n",
    "        dataset = torch.utils.data.TensorDataset(train_features, train_labels)\n",
    "        return torch.utils.data.DataLoader(dataset, self.batch_size, shuffle=True)\n",
    "\n",
    "    def show_loss(self, features, labels, team):\n",
    "        net.eval()\n",
    "        batch = self.make_iter(features, labels)\n",
    "        loss_num = 0\n",
    "        n = 0\n",
    "        for x, y in batch:\n",
    "            loss_num += loss(net(x), y).sum().item()\n",
    "            n += 1\n",
    "        print(team, end=' ')\n",
    "        print('loss:', loss_num / n)\n",
    "\n",
    "    def train(self, features, labels):\n",
    "        net.train()\n",
    "        train_iter = self.make_iter(features, labels)\n",
    "        for X, y in train_iter:\n",
    "            y_hat = net(X)\n",
    "            l = loss(y_hat, y).sum()\n",
    "            optimizer.zero_grad()\n",
    "            l.backward()\n",
    "            optimizer.step()\n",
    "        self.show_loss(features, labels, '训练集')\n",
    "\n",
    "    def run(self):\n",
    "        if self.k_fold:\n",
    "            kf = KFold(n_splits=self.k_fold_num, shuffle=True)\n",
    "            for epoch in range(self.num_epochs):\n",
    "                fold_num = 0\n",
    "                for train_index, test_index in kf.split(train_features):\n",
    "                    X_train, X_test = train_features[train_index], train_features[\n",
    "                        test_index]\n",
    "                    y_train, y_test = train_labels[train_index], train_labels[\n",
    "                        test_index]\n",
    "                    print('第%d轮的第%d折：' % (epoch + 1, fold_num + 1))\n",
    "                    fold_num += 1\n",
    "                    self.train(X_train, y_train)\n",
    "                    self.show_loss(X_test, y_test, '测试集')\n",
    "        else:\n",
    "            for epoch in range(self.num_epochs):\n",
    "                print('第%d轮：' % (epoch + 1))\n",
    "                self.train(train_features, train_labels)\n",
    "\n",
    "    def write(self, version):\n",
    "        net.eval()\n",
    "        test_iter = torch.utils.data.DataLoader(torch.tensor(test_features,\n",
    "                                                             dtype=torch.float).to(device),\n",
    "                                                1024,\n",
    "                                                shuffle=False)\n",
    "        testResult = [line for x in test_iter for line in net(x).cpu().detach().numpy()]\n",
    "        sampleSubmission = pd.read_csv('../input/sf-crime/sampleSubmission.csv.zip')\n",
    "        Result_pd = pd.DataFrame(testResult,\n",
    "                                 index=sampleSubmission.index,\n",
    "                                 columns=sampleSubmission.columns[1:])\n",
    "        Result_pd.to_csv('../working/sampleSubmission('+str(version)+').csv', index_label='Id')\n",
    "        torch.save(net, '../working/net('+str(version)+').pkl')\n",
    "        print('Finish!')\n",
    "        \n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "class MultiClassLogLoss(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super(MultiClassLogLoss, self).__init__()\n",
    "\n",
    "    def forward(self, y_pred, y_true):\n",
    "        return -(y_true *\n",
    "                 torch.log(y_pred.float() + 1.00000000e-15)) / y_true.shape[0]\n",
    "\n",
    "loss = MultiClassLogLoss().to(device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## ADAM\n",
    "\n",
    "适度调大batch_size，加快训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第1轮的第1折：\n",
      "训练集 loss: 2.5714463850834988\n",
      "测试集 loss: 2.5684672702442515\n",
      "第1轮的第2折：\n",
      "训练集 loss: 2.576505300215074\n",
      "测试集 loss: 2.579311692631328\n",
      "第1轮的第3折：\n",
      "训练集 loss: 2.56927140642713\n",
      "测试集 loss: 2.5692834620709184\n",
      "第1轮的第4折：\n",
      "训练集 loss: 2.5726130095395177\n",
      "测试集 loss: 2.5766285732909515\n",
      "第1轮的第5折：\n",
      "训练集 loss: 2.5695439518748464\n",
      "测试集 loss: 2.5632351228407213\n",
      "第1轮的第6折：\n",
      "训练集 loss: 2.5716270606834573\n",
      "测试集 loss: 2.5772705244851277\n",
      "第2轮的第1折：\n",
      "训练集 loss: 2.5717541221138482\n",
      "测试集 loss: 2.5788582571736582\n",
      "第2轮的第2折：\n",
      "训练集 loss: 2.5699191717001106\n",
      "测试集 loss: 2.5719893612228075\n",
      "第2轮的第3折：\n",
      "训练集 loss: 2.5735102210011513\n",
      "测试集 loss: 2.5745553886973775\n",
      "第2轮的第4折：\n",
      "训练集 loss: 2.5786073544642307\n",
      "测试集 loss: 2.5801492787741283\n",
      "第2轮的第5折：\n",
      "训练集 loss: 2.57283088677413\n",
      "测试集 loss: 2.567786668564056\n",
      "第2轮的第6折：\n",
      "训练集 loss: 2.5682252960605223\n",
      "测试集 loss: 2.5647884648996633\n",
      "第3轮的第1折：\n",
      "训练集 loss: 2.5689177659841684\n",
      "测试集 loss: 2.5679092207155025\n",
      "第3轮的第2折：\n",
      "训练集 loss: 2.567440762553182\n",
      "测试集 loss: 2.569945113642232\n",
      "第3轮的第3折：\n",
      "训练集 loss: 2.572759691318432\n",
      "测试集 loss: 2.5698793734703864\n",
      "第3轮的第4折：\n",
      "训练集 loss: 2.577294387350549\n",
      "测试集 loss: 2.578237691959301\n",
      "第3轮的第5折：\n",
      "训练集 loss: 2.570175269266942\n",
      "测试集 loss: 2.5723287125567458\n",
      "第3轮的第6折：\n",
      "训练集 loss: 2.5706060166125533\n",
      "测试集 loss: 2.5746588857023864\n"
     ]
    }
   ],
   "source": [
    "class build_model(torch.nn.Module):\n",
    "    def __init__(self, num_inputs, num_outputs):\n",
    "        super(build_model, self).__init__()\n",
    "        self.net = torch.nn.Sequential()\n",
    "        self.net.add_module('Linear', nn.Linear(num_inputs, num_outputs))\n",
    "        self.net.add_module('Softmax', nn.Softmax(dim=-1))\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.net(x)\n",
    "\n",
    "net = build_model(num_inputs, num_outputs).to(device)\n",
    "\n",
    "optimizer = torch.optim.Adam(net.parameters(), lr = 0.1)\n",
    "\n",
    "sf_crime_1 = sf_crime(num_epochs = 3, k_fold_num = 6, batch_size = 1024, k_fold = True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 隐藏层及激活函数\n",
    "\n",
    "降低lr"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第1轮的第1折：\n",
      "训练集 loss: 2.560995357686823\n",
      "测试集 loss: 2.5636703234452467\n",
      "第1轮的第2折：\n",
      "训练集 loss: 2.560400128864742\n",
      "测试集 loss: 2.566296585789927\n",
      "第1轮的第3折：\n",
      "训练集 loss: 2.5435580500355965\n",
      "测试集 loss: 2.5449448682211497\n",
      "第1轮的第4折：\n",
      "训练集 loss: 2.536481381796457\n",
      "测试集 loss: 2.52774959177404\n",
      "第1轮的第5折：\n",
      "训练集 loss: 2.5378770478121884\n",
      "测试集 loss: 2.541401341244891\n",
      "第1轮的第6折：\n",
      "训练集 loss: 2.5385033090631444\n",
      "测试集 loss: 2.5405327306760777\n",
      "第2轮的第1折：\n",
      "训练集 loss: 2.5233555086842783\n",
      "测试集 loss: 2.521572156385942\n",
      "第2轮的第2折：\n",
      "训练集 loss: 2.5283460170239\n",
      "测试集 loss: 2.5291790261968865\n",
      "第2轮的第3折：\n",
      "训练集 loss: 2.529217211516587\n",
      "测试集 loss: 2.531825642485719\n",
      "第2轮的第4折：\n",
      "训练集 loss: 2.5321651495420014\n",
      "测试集 loss: 2.53450096903981\n",
      "第2轮的第5折：\n",
      "训练集 loss: 2.5248642624674975\n",
      "测试集 loss: 2.5277930823239414\n",
      "第2轮的第6折：\n",
      "训练集 loss: 2.525798779934436\n",
      "测试集 loss: 2.5294350827490533\n",
      "第3轮的第1折：\n",
      "训练集 loss: 2.5223946211221335\n",
      "测试集 loss: 2.5233165034047373\n",
      "第3轮的第2折：\n",
      "训练集 loss: 2.526981333752612\n",
      "测试集 loss: 2.52607898945575\n",
      "第3轮的第3折：\n",
      "训练集 loss: 2.526863006111625\n",
      "测试集 loss: 2.533160741512592\n",
      "第3轮的第4折：\n",
      "训练集 loss: 2.5271550465297032\n",
      "测试集 loss: 2.532358574700522\n",
      "第3轮的第5折：\n",
      "训练集 loss: 2.5443091789325636\n",
      "测试集 loss: 2.538275351891151\n",
      "第3轮的第6折：\n",
      "训练集 loss: 2.539229459695883\n",
      "测试集 loss: 2.540893953163307\n"
     ]
    }
   ],
   "source": [
    "class build_model(torch.nn.Module):\n",
    "    def __init__(self, num_inputs, num_outputs):\n",
    "        super(build_model, self).__init__()\n",
    "        self.net = torch.nn.Sequential()\n",
    "        self.net.add_module('Linear1', nn.Linear(num_inputs, 256))\n",
    "        self.net.add_module('PReLU1', nn.PReLU(256))\n",
    "        self.net.add_module('Linear-out', nn.Linear(256, num_outputs))\n",
    "        self.net.add_module('Softmax', nn.Softmax(dim=-1))\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.net(x)\n",
    "\n",
    "net = build_model(num_inputs, num_outputs).to(device)\n",
    "\n",
    "optimizer = torch.optim.Adam(net.parameters(), lr = 0.05)\n",
    "\n",
    "sf_crime_2 = sf_crime(num_epochs = 3, k_fold_num = 6, batch_size = 1024, k_fold = True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Dropout层及BN算法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第1轮的第1折：\n",
      "训练集 loss: 2.56638293999892\n",
      "测试集 loss: 2.562827185317353\n",
      "第1轮的第2折：\n",
      "训练集 loss: 2.558674452521584\n",
      "测试集 loss: 2.562333337076894\n",
      "第1轮的第3折：\n",
      "训练集 loss: 2.5553139136387752\n",
      "测试集 loss: 2.5554991201920942\n",
      "第1轮的第4折：\n",
      "训练集 loss: 2.5519849093643936\n",
      "测试集 loss: 2.553815353166807\n",
      "第1轮的第5折：\n",
      "训练集 loss: 2.559399174643563\n",
      "测试集 loss: 2.564164853596187\n",
      "第1轮的第6折：\n",
      "训练集 loss: 2.55796268236387\n",
      "测试集 loss: 2.555022451427433\n",
      "第2轮的第1折：\n",
      "训练集 loss: 2.555549265454699\n",
      "测试集 loss: 2.555476402069305\n",
      "第2轮的第2折：\n",
      "训练集 loss: 2.555449908930105\n",
      "测试集 loss: 2.5593174537578665\n",
      "第2轮的第3折：\n",
      "训练集 loss: 2.5539399793931654\n",
      "测试集 loss: 2.551660310971987\n",
      "第2轮的第4折：\n",
      "训练集 loss: 2.552227055609643\n",
      "测试集 loss: 2.5512160838067115\n",
      "第2轮的第5折：\n",
      "训练集 loss: 2.566546737230741\n",
      "测试集 loss: 2.5664986213604055\n",
      "第2轮的第6折：\n",
      "训练集 loss: 2.5491769980717374\n",
      "测试集 loss: 2.5525687074327803\n",
      "第3轮的第1折：\n",
      "训练集 loss: 2.5510908320233536\n",
      "测试集 loss: 2.5498190142891626\n",
      "第3轮的第2折：\n",
      "训练集 loss: 2.552627453437218\n",
      "测试集 loss: 2.5575146274966793\n",
      "第3轮的第3折：\n",
      "训练集 loss: 2.549942904585725\n",
      "测试集 loss: 2.548543086418739\n",
      "第3轮的第4折：\n",
      "训练集 loss: 2.5563663349284993\n",
      "测试集 loss: 2.5596473083629476\n",
      "第3轮的第5折：\n",
      "训练集 loss: 2.55066615851609\n",
      "测试集 loss: 2.5511944394011596\n",
      "第3轮的第6折：\n",
      "训练集 loss: 2.548675792867487\n",
      "测试集 loss: 2.545277478811624\n"
     ]
    }
   ],
   "source": [
    "class build_model(torch.nn.Module):\n",
    "    def __init__(self, num_inputs, num_outputs, hn=64, dp=0.5, layers=2):\n",
    "        super(build_model, self).__init__()\n",
    "        self.net = torch.nn.Sequential()\n",
    "        self.net.add_module('Linear1', nn.Linear(num_inputs, hn))\n",
    "        self.net.add_module('PReLU1', nn.PReLU(hn))\n",
    "        self.net.add_module('Dropout1', nn.Dropout(p=dp))\n",
    "        for i in range(layers):\n",
    "            self.net.add_module('Linear' + str(i + 2), nn.Linear(hn, hn))\n",
    "            self.net.add_module('PReLU' + str(i + 2), nn.PReLU(hn))\n",
    "            self.net.add_module('BatchNorm' + str(i + 1), nn.BatchNorm1d(hn))\n",
    "            self.net.add_module('Dropout' + str(i + 2), nn.Dropout(p=dp))\n",
    "        self.net.add_module('Linear-out', nn.Linear(hn, num_outputs))\n",
    "        self.net.add_module('Softmax', nn.Softmax(dim=-1))\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.net(x)\n",
    "\n",
    "net = build_model(num_inputs, num_outputs).to(device)\n",
    "\n",
    "optimizer = torch.optim.Adam(net.parameters(), lr = 0.05)\n",
    "\n",
    "sf_crime_3 = sf_crime(num_epochs = 3, k_fold_num = 6, batch_size = 1024, k_fold = True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 残差\n",
    "\n",
    "降低lr"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第1轮的第1折：\n",
      "训练集 loss: 2.5189488190871017\n",
      "测试集 loss: 2.5260313390851854\n",
      "第1轮的第2折：\n",
      "训练集 loss: 2.513177054078429\n",
      "测试集 loss: 2.518192304597868\n",
      "第1轮的第3折：\n",
      "训练集 loss: 2.5007555911591\n",
      "测试集 loss: 2.5121760801835493\n",
      "第1轮的第4折：\n",
      "训练集 loss: 2.4861165626899346\n",
      "测试集 loss: 2.491209493650423\n",
      "第1轮的第5折：\n",
      "训练集 loss: 2.4690725643318014\n",
      "测试集 loss: 2.4683254148576643\n",
      "第1轮的第6折：\n",
      "训练集 loss: 2.4476417331428793\n",
      "测试集 loss: 2.4567126744276995\n",
      "第2轮的第1折：\n",
      "训练集 loss: 2.4210602110082453\n",
      "测试集 loss: 2.4312199912704786\n",
      "第2轮的第2折：\n",
      "训练集 loss: 2.4077808860298635\n",
      "测试集 loss: 2.41906263944986\n",
      "第2轮的第3折：\n",
      "训练集 loss: 2.398388769243147\n",
      "测试集 loss: 2.41259841485457\n",
      "第2轮的第4折：\n",
      "训练集 loss: 2.397047152552571\n",
      "测试集 loss: 2.4044536777309604\n",
      "第2轮的第5折：\n",
      "训练集 loss: 2.389288325743242\n",
      "测试集 loss: 2.3966387618671763\n",
      "第2轮的第6折：\n",
      "训练集 loss: 2.372035514057933\n",
      "测试集 loss: 2.3938787950502407\n",
      "第3轮的第1折：\n",
      "训练集 loss: 2.3670885936363595\n",
      "测试集 loss: 2.3810817961926225\n",
      "第3轮的第2折：\n",
      "训练集 loss: 2.358098377214445\n",
      "测试集 loss: 2.3811613179586986\n",
      "第3轮的第3折：\n",
      "训练集 loss: 2.3503227817428693\n",
      "测试集 loss: 2.3700029499880917\n",
      "第3轮的第4折：\n",
      "训练集 loss: 2.3469226140242356\n",
      "测试集 loss: 2.374108921397816\n",
      "第3轮的第5折：\n",
      "训练集 loss: 2.332383739364731\n",
      "测试集 loss: 2.3483906792594005\n",
      "第3轮的第6折：\n",
      "训练集 loss: 2.3233934685900497\n",
      "测试集 loss: 2.3481683847787496\n"
     ]
    }
   ],
   "source": [
    "class Residual(nn.Module):\n",
    "    def __init__(self, num_inputs, num_outputs):\n",
    "        super(Residual, self).__init__()\n",
    "        self.middle_L = nn.Linear(num_inputs, num_outputs)\n",
    "        self.middle_R = nn.ReLU(num_outputs)\n",
    "        if num_inputs != num_outputs:\n",
    "            self.right = nn.Linear(num_inputs, num_outputs)\n",
    "        else:\n",
    "            self.right = None\n",
    "        self.middle_B = nn.BatchNorm1d(num_outputs)\n",
    "    def forward(self, X):\n",
    "        Y = self.middle_B(self.middle_R(self.middle_L(X)))\n",
    "        if self.right:\n",
    "            X = self.right(X)\n",
    "        return Y + X\n",
    "    \n",
    "class build_model(nn.Module):\n",
    "    def __init__(self, num_inputs, num_outputs, dp=0.5):\n",
    "        super(build_model, self).__init__()\n",
    "        self.net = nn.Sequential()\n",
    "        self.net.add_module('Residual1', Residual(num_inputs, 1024))\n",
    "        self.net.add_module('Residual2', Residual(1024, 512))\n",
    "        self.net.add_module('Residual3', Residual(512, 512))\n",
    "        self.net.add_module('Residual4', Residual(512, 256))\n",
    "        self.net.add_module('Residual5', Residual(256, 256))\n",
    "        self.net.add_module('Residual6', Residual(256, 128))\n",
    "        self.net.add_module('Residual7', Residual(128, 128))\n",
    "        self.net.add_module('Residual8', Residual(128, 64))\n",
    "        self.net.add_module('Residual9', Residual(64, 64))\n",
    "        self.net.add_module('Linear-out', nn.Linear(64, num_outputs))\n",
    "        self.net.add_module('Softmax', nn.Softmax(dim=-1))\n",
    "    def forward(self, x):\n",
    "        return self.net(x)\n",
    "    \n",
    "net = build_model(num_inputs, num_outputs).to(device)\n",
    "\n",
    "optimizer = torch.optim.Adam(net.parameters(), lr = 0.001)\n",
    "\n",
    "sf_crime_4 = sf_crime(num_epochs = 3, k_fold_num = 6, batch_size = 1024, k_fold = True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "误差降得快，训练效果好，进行输出"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "net = build_model(num_inputs, num_outputs).to(device)\n",
    "sf_crime_5 = sf_crime(num_epochs = 75, k_fold_num = 5, batch_size = 128, k_fold = False)\n",
    "sf_crime_5.write('v2')\n",
    "# 此部分数据是上交到kaggle后由GPU跑出，参数未变，但由于缺少计算资源，在CPU上运行时间太长，加之代码、硬件变化，不予复现"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "提交，发现过拟合"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 降低过拟合\n",
    "\n",
    "增加Dropout层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第1轮的第1折：\n",
      "训练集 loss: 2.531314628441017\n",
      "测试集 loss: 2.537703082278058\n",
      "第1轮的第2折：\n",
      "训练集 loss: 2.522075258935248\n",
      "测试集 loss: 2.5217196908030477\n",
      "第1轮的第3折：\n",
      "训练集 loss: 2.51781182089052\n",
      "测试集 loss: 2.5205326630518985\n",
      "第1轮的第4折：\n",
      "训练集 loss: 2.5116197255941537\n",
      "测试集 loss: 2.5127988778627834\n",
      "第1轮的第5折：\n",
      "训练集 loss: 2.499079208440714\n",
      "测试集 loss: 2.499604763684573\n",
      "第1轮的第6折：\n",
      "训练集 loss: 2.4888608285597154\n",
      "测试集 loss: 2.496323132014775\n",
      "第2轮的第1折：\n",
      "训练集 loss: 2.4723022034118225\n",
      "测试集 loss: 2.4704653833295915\n",
      "第2轮的第2折：\n",
      "训练集 loss: 2.4573255052099694\n",
      "测试集 loss: 2.4650287711536967\n",
      "第2轮的第3折：\n",
      "训练集 loss: 2.450022388338209\n",
      "测试集 loss: 2.4555471510320275\n",
      "第2轮的第4折：\n",
      "训练集 loss: 2.44492984051471\n",
      "测试集 loss: 2.4528587331305016\n",
      "第2轮的第5折：\n",
      "训练集 loss: 2.4407840758770494\n",
      "测试集 loss: 2.4463681807884803\n",
      "第2轮的第6折：\n",
      "训练集 loss: 2.4352504493473295\n",
      "测试集 loss: 2.4402960563873077\n",
      "第3轮的第1折：\n",
      "训练集 loss: 2.429513962952407\n",
      "测试集 loss: 2.4368271877715637\n",
      "第3轮的第2折：\n",
      "训练集 loss: 2.422563133373127\n",
      "测试集 loss: 2.4347961399105045\n",
      "第3轮的第3折：\n",
      "训练集 loss: 2.4207374719473034\n",
      "测试集 loss: 2.425267246219662\n",
      "第3轮的第4折：\n",
      "训练集 loss: 2.416952591342526\n",
      "测试集 loss: 2.4215744291985786\n",
      "第3轮的第5折：\n",
      "训练集 loss: 2.406511242072899\n",
      "测试集 loss: 2.4143979249300656\n",
      "第3轮的第6折：\n",
      "训练集 loss: 2.4068318183605486\n",
      "测试集 loss: 2.4127483217866272\n"
     ]
    }
   ],
   "source": [
    "class Residual(nn.Module):\n",
    "    def __init__(self, num_inputs, num_outputs):\n",
    "        super(Residual, self).__init__()\n",
    "        self.middle_L = nn.Linear(num_inputs, num_outputs)\n",
    "        self.middle_R = nn.ReLU(num_outputs)\n",
    "        if num_inputs != num_outputs:\n",
    "            self.right = nn.Linear(num_inputs, num_outputs)\n",
    "        else:\n",
    "            self.right = None\n",
    "        self.middle_B = nn.BatchNorm1d(num_outputs)\n",
    "    def forward(self, X):\n",
    "        Y = self.middle_B(self.middle_R(self.middle_L(X)))\n",
    "        if self.right:\n",
    "            X = self.right(X)\n",
    "        return Y + X\n",
    "    \n",
    "class build_model(nn.Module):\n",
    "    def __init__(self, num_inputs, num_outputs, dp=0.5):\n",
    "        super(build_model, self).__init__()\n",
    "        self.net = nn.Sequential()\n",
    "        self.net.add_module('Residual1', Residual(num_inputs, 1024))\n",
    "        self.net.add_module('Residual2', Residual(1024, 512))\n",
    "        self.net.add_module('Residual3', Residual(512, 512))\n",
    "        self.net.add_module('Residual4', Residual(512, 256))\n",
    "        self.net.add_module('Dropout1', nn.Dropout(dp))\n",
    "        self.net.add_module('Residual5', Residual(256, 256))\n",
    "        self.net.add_module('Residual6', Residual(256, 128))\n",
    "        self.net.add_module('Residual7', Residual(128, 128))\n",
    "        self.net.add_module('Residual8', Residual(128, 64))\n",
    "        self.net.add_module('Dropout2', nn.Dropout(dp))\n",
    "        self.net.add_module('Residual9', Residual(64, 64))\n",
    "        self.net.add_module('Linear-out', nn.Linear(64, num_outputs))\n",
    "        self.net.add_module('Softmax', nn.Softmax(dim=-1))\n",
    "    def forward(self, x):\n",
    "        return self.net(x)\n",
    "    \n",
    "net = build_model(num_inputs, num_outputs).to(device)\n",
    "\n",
    "optimizer = torch.optim.Adam(net.parameters(), lr = 0.001)\n",
    "\n",
    "sf_crime_6 = sf_crime(num_epochs = 3, k_fold_num = 6, batch_size = 1024, k_fold = True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "训练效果好，进行输出"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "net = build_model(num_inputs, num_outputs).to(device)\n",
    "sf_crime_7 = sf_crime(num_epochs = 100, k_fold_num = 5, batch_size = 128, k_fold = False)\n",
    "sf_crime_7.write('v4')\n",
    "# 此部分数据是上交到kaggle后由GPU跑出，参数未变，但由于缺少计算资源，在CPU上运行时间太长，加之代码、硬件变化，不予复现\n",
    "# notebook:https://www.kaggle.com/doublepoi/a-nn-with-residual-v4\n",
    "# 但已保留训练好的net一个，位于/working目录下"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "提交"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.6.10 64-bit ('Pytorch-learn': conda)",
   "language": "python",
   "name": "python361064bitpytorchlearnconda080df47efea24539a61202fa66a72562"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
