{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "mjqPwkpZLq7k"
   },
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "import torchvision as tv\n",
    "from torchvision import transforms, utils\n",
    "\n",
    "\n",
    "import torch.nn.functional as F\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "import torchvision.models as models\n",
    "# from torchvision import transforms, utils\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "# from PIL import Image\n",
    "# import numpy as np\n",
    "import torch.optim as optim\n",
    "# import os\n",
    "import time\n",
    "\n",
    "import loadData"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 日志记录模块\n",
    "import logging\n",
    "logger = logging.getLogger(__name__)\n",
    "logger.setLevel(level = logging.INFO)\n",
    "handler = logging.FileHandler(\"./logs/2_Synchronization_Logs/log1245_relu.txt\")\n",
    "handler.setLevel(logging.INFO)\n",
    "formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n",
    "handler.setFormatter(formatter)\n",
    "\n",
    "console = logging.StreamHandler()\n",
    "console.setLevel(logging.INFO)\n",
    "\n",
    "logger.addHandler(handler)\n",
    "logger.addHandler(console)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "startTime = time.time()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "oMgidBKKLq7s"
   },
   "outputs": [],
   "source": [
    "transforms = tv.transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "])\n",
    "\n",
    "\n",
    "numOfBatch = 9\n",
    "pixel = 64\n",
    "flag = 0\n",
    "root = f'./{numOfBatch}/data'\n",
    "batchSize = 2\n",
    "train_data = loadData.MyDataset(txt=f'files_train.txt', transform=None)\n",
    "test_data = loadData.MyDataset(txt=f'files_test.txt', transform=None)\n",
    "\n",
    "\n",
    "train_loader = DataLoader(dataset=train_data, batch_size=batchSize, shuffle=True, num_workers=2)\n",
    "test_loader = DataLoader(dataset=test_data, batch_size=batchSize, shuffle=False, num_workers=2)\n",
    "\n",
    "\n",
    "# print(train_loader)\n",
    "# print(test_loader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "6h-TJ16XLq7y",
    "outputId": "d4f641b6-e9d4-4aa4-8672-13c98099eb01"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<torch.utils.data.dataloader.DataLoader at 0x7f672c98c450>"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_loader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 487
    },
    "colab_type": "code",
    "id": "CSdZoIqDLq7-",
    "outputId": "22fd354b-1be5-4f7d-e772-7873db328b62"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2\n",
      "tensor([[[-0.0193,  0.0178,  0.0277,  ..., -0.4534, -0.3010, -0.0785],\n",
      "         [ 0.0061, -0.0685,  0.0233,  ..., -0.8888, -0.9163, -0.9015],\n",
      "         [ 0.2542,  0.5903,  0.8284,  ...,  0.8976,  0.7912,  0.6770],\n",
      "         ...,\n",
      "         [-0.9518, -0.9368, -0.7362,  ...,  0.0844,  0.3665,  0.5233],\n",
      "         [ 0.6924,  0.3384, -0.0494,  ..., -0.7578, -0.9504, -0.9461],\n",
      "         [ 0.6607,  0.9698,  0.9863,  ...,  0.6249,  0.4010,  0.1975]],\n",
      "\n",
      "        [[-0.0133, -0.0067, -0.0523,  ...,  0.7996,  0.4826,  0.2688],\n",
      "         [ 0.0041, -0.0103, -0.0011,  ...,  0.6169,  0.8347,  0.9538],\n",
      "         [ 0.0567, -0.3440, -0.6901,  ..., -1.0089, -0.8610, -0.6572],\n",
      "         ...,\n",
      "         [-0.0039,  0.2655,  0.6478,  ...,  0.9610,  0.9777,  0.9047],\n",
      "         [-0.5665, -0.7257, -0.9572,  ..., -0.7634, -0.4332, -0.1338],\n",
      "         [ 0.8026,  0.6190,  0.2434,  ..., -0.6020, -0.8721, -0.9643]]],\n",
      "       dtype=torch.float64) tensor([12,  7])\n"
     ]
    }
   ],
   "source": [
    "data = iter(train_loader)\n",
    "\n",
    "# print(type(data))\n",
    "# print(len(data))\n",
    "data_, label = data.next()\n",
    "print(train_loader.batch_size)\n",
    "print(data_, label)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "9ZSua6ALLq8I"
   },
   "outputs": [],
   "source": [
    "class Net(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Net, self).__init__()\n",
    "        self.conv1 = nn.Conv1d(in_channels=160, out_channels=120, kernel_size=2)\n",
    "        self.conv2 = nn.Conv1d(in_channels=120, out_channels=100, kernel_size=2)\n",
    "        self.conv3 = nn.Conv1d(in_channels=100, out_channels=80, kernel_size=2)\n",
    "        self.dropout1 = nn.Dropout(0.5)\n",
    "        self.dropout2 = nn.Dropout(0.5)\n",
    "        \n",
    "#         self.fc1 = nn.Linear(1360, 1024)\n",
    "        self.fc1 = nn.Linear(31760, 10240)\n",
    "        self.fc2 = nn.Linear(10240, 1000)\n",
    "        self.fc3 = nn.Linear(1000, 54)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "#         x = F.elu(x)\n",
    "        x = F.relu(x)\n",
    "        x = F.max_pool1d(x, 1)\n",
    "        x = self.conv2(x)\n",
    "#         x = F.elu(x)\n",
    "        x = F.relu(x)\n",
    "        x = F.max_pool1d(x, 1)\n",
    "        x = self.conv3(x)\n",
    "#         x = F.elu(x)\n",
    "        x = F.relu(x)\n",
    "        x = F.max_pool1d(x, 1)\n",
    "        x = torch.flatten(x, 1)\n",
    "#         print(\"x的大小为:\", x.shape)\n",
    "        x = self.fc1(x)\n",
    "#         x = F.elu(x)\n",
    "        x = F.relu(x)\n",
    "        x = self.dropout1(x)\n",
    "        x = self.fc2(x)\n",
    "#         x = F.elu(x)\n",
    "        x = F.relu(x)\n",
    "        x = self.dropout2(x)\n",
    "        x = self.fc3(x)\n",
    "        output = F.log_softmax(x, dim=1)\n",
    "        return output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "WBWBdr1LLq82"
   },
   "outputs": [],
   "source": [
    "net = Net()\n",
    "\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(params=net.parameters(), lr=0.001, momentum=0.9)\n",
    "\n",
    "epochs = 20\n",
    "average_loss_series = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 286
    },
    "colab_type": "code",
    "id": "87bfeNToLq87",
    "outputId": "af954cd8-8959-4212-ff89-482e57c90d4e"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[-0.0220,  0.0337,  0.0161,  ...,  0.7558,  0.5126,  0.3119],\n",
      "         [-0.0053,  0.0234,  0.0235,  ...,  0.7362,  0.8717,  0.9096],\n",
      "         [-0.0341, -0.4607, -0.7435,  ..., -0.5255, -0.3390, -0.0993],\n",
      "         ...,\n",
      "         [-0.9389, -0.8856, -0.6643,  ...,  0.9262,  1.0187,  0.9715],\n",
      "         [-0.3053, -0.7318, -0.9425,  ..., -0.2263,  0.0683,  0.2490],\n",
      "         [ 0.9187,  0.6798,  0.4139,  ..., -0.9936, -0.9952, -0.9650]],\n",
      "\n",
      "        [[ 0.0023, -0.0039,  0.0409,  ...,  0.8294,  1.0393,  1.0135],\n",
      "         [ 0.0139,  0.0183,  0.0167,  ..., -0.6374, -0.2653,  0.0838],\n",
      "         [ 0.9587,  0.7958,  0.4388,  ...,  1.0043,  0.8319,  0.6027],\n",
      "         ...,\n",
      "         [ 0.3104,  0.5367,  0.8230,  ...,  0.1225,  0.4874,  0.7677],\n",
      "         [ 0.4079,  0.1474, -0.2737,  ...,  0.3734, -0.0072, -0.3774],\n",
      "         [ 0.8833,  0.9919,  0.9828,  ...,  0.9365,  1.0167,  0.9903]]],\n",
      "       dtype=torch.float64)\n"
     ]
    }
   ],
   "source": [
    "for a, b in enumerate(train_loader):\n",
    "    inputs, labels = b\n",
    "    print(inputs)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 338
    },
    "colab_type": "code",
    "id": "vrT4DrYlQ_sf",
    "outputId": "f29983ea-a956-45f1-ff43-cf529516ba11"
   },
   "outputs": [],
   "source": [
    "# s = torch.randn(2, 160,20).double()\n",
    "# conv = nn.Conv1d(in_channels=160, out_channels=120, kernel_size=2)\n",
    "# conv(s)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    },
    "colab_type": "code",
    "id": "_c6Gwp3ALq9k",
    "outputId": "60864ad5-6b9b-4875-a39f-9600521d3629"
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:17: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
      "[1, 10] loss: 3.979781079292297\n",
      "[1, 10] acc: 0.10000000149011612\n",
      "[1, 20] loss: 3.9945759057998655\n",
      "[1, 20] acc: 0.05000000074505806\n",
      "[1, 30] loss: 4.00864188671112\n",
      "[1, 30] acc: 0.0\n",
      "[1, 40] loss: 3.992899012565613\n",
      "[1, 40] acc: 0.10000000149011612\n",
      "[1, 50] loss: 4.063576006889344\n",
      "[1, 50] acc: 0.0\n",
      "[1, 60] loss: 3.9747734308242797\n",
      "[1, 60] acc: 0.0\n",
      "[1, 70] loss: 4.022980809211731\n",
      "[1, 70] acc: 0.0\n",
      "[1, 80] loss: 3.993206262588501\n",
      "[1, 80] acc: 0.05000000074505806\n",
      "[1, 90] loss: 4.025311851501465\n",
      "[1, 90] acc: 0.05000000074505806\n",
      "[1, 100] loss: 4.053521609306335\n",
      "[1, 100] acc: 0.0\n",
      "[2, 10] loss: 3.7449958086013795\n",
      "[2, 10] acc: 0.75\n",
      "[2, 20] loss: 3.8088069915771485\n",
      "[2, 20] acc: 0.5\n",
      "[2, 30] loss: 3.7312878131866456\n",
      "[2, 30] acc: 0.6000000238418579\n",
      "[2, 40] loss: 3.645274829864502\n",
      "[2, 40] acc: 0.699999988079071\n",
      "[2, 50] loss: 3.5376358032226562\n",
      "[2, 50] acc: 0.6499999761581421\n",
      "[2, 60] loss: 3.6383159160614014\n",
      "[2, 60] acc: 0.6499999761581421\n",
      "[2, 70] loss: 3.7328828096389772\n",
      "[2, 70] acc: 0.550000011920929\n",
      "[2, 80] loss: 3.534365487098694\n",
      "[2, 80] acc: 0.6499999761581421\n",
      "[2, 90] loss: 3.820701837539673\n",
      "[2, 90] acc: 0.3499999940395355\n",
      "[2, 100] loss: 3.531352972984314\n",
      "[2, 100] acc: 0.5\n",
      "[3, 10] loss: 2.546364402770996\n",
      "[3, 10] acc: 0.8500000238418579\n",
      "[3, 20] loss: 1.9869668066501618\n",
      "[3, 20] acc: 0.8999999761581421\n",
      "[3, 30] loss: 2.9506578922271727\n",
      "[3, 30] acc: 0.699999988079071\n",
      "[3, 40] loss: 2.8015838503837585\n",
      "[3, 40] acc: 0.6499999761581421\n",
      "[3, 50] loss: 3.1831583857536314\n",
      "[3, 50] acc: 0.5\n",
      "[3, 60] loss: 2.4654622107744215\n",
      "[3, 60] acc: 0.6499999761581421\n",
      "[3, 70] loss: 2.2072221785783768\n",
      "[3, 70] acc: 0.6499999761581421\n",
      "[3, 80] loss: 2.40392881333828\n",
      "[3, 80] acc: 0.550000011920929\n",
      "[3, 90] loss: 2.3465710878372192\n",
      "[3, 90] acc: 0.550000011920929\n",
      "[3, 100] loss: 3.187343657016754\n",
      "[3, 100] acc: 0.5\n",
      "[4, 10] loss: 0.40561061073094606\n",
      "[4, 10] acc: 0.949999988079071\n",
      "[4, 20] loss: 1.6177967570722103\n",
      "[4, 20] acc: 0.800000011920929\n",
      "[4, 30] loss: 1.7766781456768512\n",
      "[4, 30] acc: 0.75\n",
      "[4, 40] loss: 1.841304892115295\n",
      "[4, 40] acc: 0.699999988079071\n",
      "[4, 50] loss: 1.7165757700800897\n",
      "[4, 50] acc: 0.699999988079071\n",
      "[4, 60] loss: 1.9172921419143676\n",
      "[4, 60] acc: 0.75\n",
      "[4, 70] loss: 1.287261481769383\n",
      "[4, 70] acc: 0.800000011920929\n",
      "[4, 80] loss: 2.265613687038422\n",
      "[4, 80] acc: 0.6499999761581421\n",
      "[4, 90] loss: 1.9131576508283614\n",
      "[4, 90] acc: 0.6499999761581421\n",
      "[4, 100] loss: 2.3322742730379105\n",
      "[4, 100] acc: 0.6499999761581421\n",
      "[5, 10] loss: 0.9175888936966657\n",
      "[5, 10] acc: 0.8500000238418579\n",
      "[5, 20] loss: 0.5453232023864985\n",
      "[5, 20] acc: 0.8500000238418579\n",
      "[5, 30] loss: 1.4083873353898526\n",
      "[5, 30] acc: 0.75\n",
      "[5, 40] loss: 0.5558428511023521\n",
      "[5, 40] acc: 0.8999999761581421\n",
      "[5, 50] loss: 0.686983872205019\n",
      "[5, 50] acc: 0.800000011920929\n",
      "[5, 60] loss: 1.0009116372093558\n",
      "[5, 60] acc: 0.800000011920929\n",
      "[5, 70] loss: 1.8904583172872662\n",
      "[5, 70] acc: 0.75\n",
      "[5, 80] loss: 1.1658931029960513\n",
      "[5, 80] acc: 0.8500000238418579\n",
      "[5, 90] loss: 1.8494055300951004\n",
      "[5, 90] acc: 0.6499999761581421\n",
      "[5, 100] loss: 1.1799492627382278\n",
      "[5, 100] acc: 0.75\n",
      "[6, 10] loss: 1.3225763911381363\n",
      "[6, 10] acc: 0.699999988079071\n",
      "[6, 20] loss: 0.8751333259046078\n",
      "[6, 20] acc: 0.8500000238418579\n",
      "[6, 30] loss: 1.4626854341477156\n",
      "[6, 30] acc: 0.800000011920929\n",
      "[6, 40] loss: 0.9039526212960481\n",
      "[6, 40] acc: 0.8999999761581421\n",
      "[6, 50] loss: 0.6943201089277864\n",
      "[6, 50] acc: 0.8500000238418579\n",
      "[6, 60] loss: 0.8164606474339962\n",
      "[6, 60] acc: 0.8999999761581421\n",
      "[6, 70] loss: 1.5032701638992876\n",
      "[6, 70] acc: 0.6499999761581421\n",
      "[6, 80] loss: 1.6226260579191147\n",
      "[6, 80] acc: 0.699999988079071\n",
      "[6, 90] loss: 1.071003404073417\n",
      "[6, 90] acc: 0.75\n",
      "[6, 100] loss: 0.9301754029467701\n",
      "[6, 100] acc: 0.800000011920929\n",
      "[7, 10] loss: 0.532021064311266\n",
      "[7, 10] acc: 0.949999988079071\n",
      "[7, 20] loss: 1.458830819837749\n",
      "[7, 20] acc: 0.800000011920929\n",
      "[7, 30] loss: 0.5548997213132679\n",
      "[7, 30] acc: 0.8999999761581421\n"
     ]
    }
   ],
   "source": [
    "%matplotlib inline\n",
    "\n",
    "torch.cuda.set_device(1)\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    for epoch in range(epochs):\n",
    "        running_loss = 0.0\n",
    "        running_acc = 0.0\n",
    "\n",
    "        for i, data in enumerate(train_loader):\n",
    "#             print(\"i:\", i )\n",
    "#             print(\"data: \", data)\n",
    "            inputs, labels = data\n",
    "            optimizer.zero_grad()\n",
    "\n",
    "            inputs = inputs.permute(0, 2, 1)\n",
    "            inputs = torch.tensor(inputs, dtype=torch.float32)\n",
    "            outputs = net(inputs)\n",
    "            _, predicted = torch.max(outputs.data, dim=1)\n",
    "            total = labels.size(0)\n",
    "#             print('原来的值:{0}, 预测的值:{1}'.format(labels, predicted))\n",
    "            running_correct = (predicted == labels).sum()\n",
    "            running_acc += running_correct\n",
    "            loss = criterion(outputs, labels)\n",
    "            running_loss += loss.item()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            # 每loopNum个batch打印一次训练状态\n",
    "            loopNum = 10\n",
    "            if i % loopNum == loopNum - 1:\n",
    "                average_loss = running_loss / loopNum\n",
    "                logger.info('[{0}, {1}] loss: {2}'.format(epoch + 1, i + 1, average_loss))\n",
    "                running_acc = running_acc.float()\n",
    "                average_acc = running_acc / loopNum / total\n",
    "                \n",
    "                logger.info('[{0}, {1}] acc: {2}'.format(epoch + 1, i + 1, average_acc))\n",
    "                average_loss_series.append(average_loss)\n",
    "                running_loss = 0.0\n",
    "                running_acc = 0.0\n",
    "\n",
    "    x = range(0, len(average_loss_series))\n",
    "    plt.figure()\n",
    "    plt.plot(x, average_loss_series)\n",
    "    plt.show()\n",
    "    # %%\n",
    "    # 在测试集上测试\n",
    "    realLabel = []\n",
    "    predictedLabel = []\n",
    "\n",
    "\n",
    "    def correct_rate(net, testloader):\n",
    "        correct = 0.0\n",
    "        total = 0.0\n",
    "\n",
    "        for data in testloader:\n",
    "            images, labels = data\n",
    "            realLabel.append([int(label) for label in labels])\n",
    "            images = images.permute(0, 2, 1).float()\n",
    "#             print(\"--------!!---------\", images)\n",
    "#             print(images.shape)\n",
    "            outputs = net(images)\n",
    "\n",
    "            _, predicted = torch.max(outputs.data, dim=1)\n",
    "            predictedLabel.append([int(label) for label in predicted])\n",
    "            total += labels.size(0)\n",
    "            correct += (predicted == labels).sum()\n",
    "#             print(\"predicted的值为：\" , predicted)\n",
    "#             print(\"labels的值为：\" , labels)\n",
    "#             print(\"correct的值为:\", correct)\n",
    "            \n",
    "        logger.info(\"total的值为：\",total)\n",
    "#         return 100 * correct  # / total\n",
    "        return 100 * correct/ total \n",
    "\n",
    "\n",
    "    correct = correct_rate(net, test_loader)\n",
    "    logger.info(f'{len(test_loader) * 3}张测试集中准确率为： {correct}%')\n",
    "\n",
    "    # %%\n",
    "    import itertools\n",
    "    from sklearn.metrics import confusion_matrix\n",
    "\n",
    "    realLabel = list(itertools.chain.from_iterable(realLabel))\n",
    "    predictedLabel = list(itertools.chain.from_iterable(predictedLabel))\n",
    "\n",
    "    cm = confusion_matrix(realLabel, predictedLabel)\n",
    "    logger.info(cm)\n",
    "\n",
    "    logger.info('the running time is', time.time() - startTime)\n",
    "    torch.save(net, f'./models/model_{pixel}_{flag}.pkl')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "A5YnF55DLq90"
   },
   "outputs": [],
   "source": [
    "aa = iter(train_loader)\n",
    "a, b = aa.next()\n",
    "print(a[0].shape)\n",
    "print(a)"
   ]
  }
 ],
 "metadata": {
  "colab": {
   "name": "3_设计并训练神经网络.ipynb",
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
