{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from utils import *\n",
    "import torch \n",
    "from torch import nn \n",
    "from torch import optim\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt \n",
    "import matplotlib\n",
    "from seaborn import heatmap\n",
    "from sklearn.metrics import *\n",
    "from collections import Counter\n",
    "from tqdm import tqdm\n",
    "from time import time\n",
    "\n",
    "\n",
    "\n",
    "matplotlib.rcParams[\"font.family\"] = \"Times New Roman\"\n",
    "matplotlib.rcParams[\"legend.fontsize\"] = 16\n",
    "matplotlib.rcParams[\"xtick.major.size\"] = 12\n",
    "matplotlib.rcParams[\"ytick.major.size\"] = 12"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Train CNN"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## prepare data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "meta data has been dumped at \u001b[32m./data/meta.json\u001b[0m\n",
      "+-------+------+---------------+---------------+\n",
      "|       | size | positive size | negative size |\n",
      "+-------+------+---------------+---------------+\n",
      "| train | 770  |      472      |      298      |\n",
      "|  test | 194  |      119      |       75      |\n",
      "+-------+------+---------------+---------------+\n"
     ]
    }
   ],
   "source": [
    "get_meta_data(\n",
    "    meta_data_file=\"./data/meta.json\",\n",
    "    train_ratio=0.8\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## check loader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:01,  1.04s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "bx shape torch.Size([64, 1, 128, 100])\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "13it [00:11,  1.13it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "check out, iteration number: 13\n",
      "Counter({0: 472, 1: 298}) ratio: 0.38701298701298703 0.612987012987013\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "iter_num = 0\n",
    "labels = []\n",
    "loader = tqdm(DataLoader(\n",
    "        meta_path=\"./data/meta.json\",\n",
    "        batch_size=64,\n",
    "        train=True,\n",
    "        shuffle=True,\n",
    "        mode=\"logmel\"\n",
    "    ))\n",
    "\n",
    "for (bx, by) in loader:\n",
    "    labels.extend(by.tolist())\n",
    "    iter_num += 1\n",
    "    if iter_num == 1:\n",
    "        print(\"bx shape\", bx.shape)\n",
    "print(\"check out, iteration number: {}\".format(iter_num))\n",
    "c = Counter(labels)\n",
    "p_n, n_n = c[0], c[1]\n",
    "r1, r2 = n_n / (p_n + n_n), p_n / (p_n + n_n)\n",
    "print(c, \"ratio:\", r1, r2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = ASCNN(\n",
    "    left_lace_size=3,\n",
    "    right_lace_size=7\n",
    ")\n",
    "loss_func = nn.CrossEntropyLoss(weight=torch.tensor([r1, r2]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = optim.Adam(model.parameters(), lr=5e-5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "loss=0.31326, acc=1.0: 100%|██████████| 13/13 [00:13<00:00,  1.07s/it]\n",
      "loss=0.31326, acc=1.0: 100%|██████████| 13/13 [00:13<00:00,  1.05s/it]\n",
      "loss=0.31326, acc=1.0: 100%|██████████| 13/13 [00:14<00:00,  1.11s/it]\n",
      "loss=0.31358, acc=1.0: 100%|██████████| 13/13 [00:14<00:00,  1.11s/it]\n",
      "loss=0.31326, acc=1.0: 100%|██████████| 13/13 [00:14<00:00,  1.09s/it]\n",
      "loss=0.31326, acc=1.0: 100%|██████████| 13/13 [00:14<00:00,  1.09s/it]\n",
      "loss=0.31326, acc=1.0: 100%|██████████| 13/13 [00:14<00:00,  1.09s/it]\n",
      "loss=0.31326, acc=1.0: 100%|██████████| 13/13 [00:14<00:00,  1.09s/it]\n",
      "loss=0.31326, acc=1.0: 100%|██████████| 13/13 [00:14<00:00,  1.10s/it]\n",
      "loss=0.31326, acc=1.0: 100%|██████████| 13/13 [00:14<00:00,  1.11s/it]\n"
     ]
    }
   ],
   "source": [
    "for epoch in range(10):\n",
    "    loader = DataLoader(\n",
    "        meta_path=\"./data/meta.json\",\n",
    "        batch_size=64,\n",
    "        train=True,\n",
    "        shuffle=True,\n",
    "        mode=\"logmel\"\n",
    "    )\n",
    "    iter_wapper = tqdm(range(iter_num))\n",
    "    for i, (bx, by) in zip(iter_wapper, loader):\n",
    "    \n",
    "        output : torch.Tensor = model(bx)\n",
    "        loss : torch.Tensor = loss_func(output, by)\n",
    "        prelab = output.argmax(1)\n",
    "        acc = accuracy_score(by.flatten(), prelab.flatten())\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        iter_wapper.set_description_str(desc=\"loss={}, acc={}\".format(\n",
    "            round(loss.item(), 5), round(acc, 3)\n",
    "        ))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "4it [00:02,  1.88it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.0\n",
      "1.0\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0      1.000     1.000     1.000       119\n",
      "           1      1.000     1.000     1.000        75\n",
      "\n",
      "    accuracy                          1.000       194\n",
      "   macro avg      1.000     1.000     1.000       194\n",
      "weighted avg      1.000     1.000     1.000       194\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "model.eval()\n",
    "loader = DataLoader(\n",
    "        meta_path=\"./data/meta.json\",\n",
    "        batch_size=64,\n",
    "        train=False,\n",
    "        shuffle=True,\n",
    "        mode=\"logmel\"\n",
    "    )\n",
    "    \n",
    "acc = []\n",
    "y_true = []\n",
    "for bx, by in tqdm(loader):\n",
    "    output = model(bx)\n",
    "    prelab = output.argmax(1)\n",
    "    acc.extend(prelab.tolist())\n",
    "    y_true.extend(by.tolist())\n",
    "    \n",
    "print(accuracy_score(y_true, acc))\n",
    "print(roc_auc_score(y_true, acc))\n",
    "print(classification_report(y_true, acc, digits=3))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(model.state_dict(), \"./model/Haier/ascnn.pkl\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# AE"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## visualise some heatmap"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = ASCNNAE()\n",
    "model.load_state_dict(torch.load(\"./model/Haier/ascnnae.pkl\"))\n",
    "model.train()\n",
    "loss_func = nn.MSELoss(reduction='mean')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = optim.Adam(model.parameters(), lr=1e-4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "loss=0.01132: 100%|██████████| 13/13 [01:16<00:00,  5.88s/it]\n",
      "loss=0.02272: 100%|██████████| 13/13 [01:17<00:00,  6.00s/it]\n",
      "loss=0.00697: 100%|██████████| 13/13 [01:15<00:00,  5.81s/it]\n",
      "loss=0.02092: 100%|██████████| 13/13 [01:16<00:00,  5.91s/it]\n",
      "loss=0.00761: 100%|██████████| 13/13 [01:17<00:00,  5.97s/it]\n"
     ]
    }
   ],
   "source": [
    "for epoch in range(5):\n",
    "    loader = DataLoader(\n",
    "        meta_path=\"./data/meta.json\",\n",
    "        batch_size=64,\n",
    "        train=True,\n",
    "        shuffle=True,\n",
    "        mode=\"logmel\"\n",
    "    )\n",
    "    iter_wapper = tqdm(range(iter_num))\n",
    "    for i, (bx, by) in zip(iter_wapper, loader):\n",
    "        p_bx = bx[by == 0]\n",
    "        n_bx = bx[by == 1]\n",
    "        _, p_output = model(p_bx)\n",
    "        _, n_output = model(n_bx)\n",
    "        p_loss : torch.Tensor = loss_func(p_output, p_bx)\n",
    "        n_loss : torch.Tensor = - loss_func(n_output, n_bx)\n",
    "        loss = p_loss\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        iter_wapper.set_description_str(desc=\"loss={}\".format(\n",
    "            round(loss.item(), 5)\n",
    "        ))\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(model.state_dict(), \"./model/Haier/ascnnae.pkl\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# visualise\n",
    "loader = DataLoader(\n",
    "        meta_path=\"./data/meta.json\",\n",
    "        batch_size=64,\n",
    "        train=True,\n",
    "        shuffle=True,\n",
    "        mode=\"logmel\"\n",
    "    )\n",
    "\n",
    "for (bx, by) in loader:\n",
    "    p_bx = bx[by == 0]    # normal\n",
    "    n_bx = bx[by == 1]    # abnormal\n",
    "    break\n",
    "\n",
    "p_bx.shape, n_bx.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = ASCNNAE()\n",
    "model.load_state_dict(torch.load(\"./model/Haier/ascnnae.pkl\"))\n",
    "\n",
    "p_x = p_bx[0].unsqueeze(0)\n",
    "n_x = n_bx[0].unsqueeze(0)\n",
    "\n",
    "_, re_p_x = model(p_x)\n",
    "_, re_n_x = model(n_x)\n",
    "\n",
    "p_x = p_x.squeeze().detach().numpy()\n",
    "n_x = n_x.squeeze().detach().numpy()\n",
    "re_p_x = re_p_x.squeeze().detach().numpy()\n",
    "re_n_x = re_n_x.squeeze().detach().numpy()\n",
    "\n",
    "plt.figure(figsize=[24, 12])\n",
    "plt.subplot(2,3,1)\n",
    "plt.title(\"feature of normal wave\")\n",
    "heatmap(p_x)\n",
    "\n",
    "plt.subplot(2,3,2)\n",
    "plt.title(\"reconstructed feature of normal wave\")\n",
    "heatmap(re_p_x)\n",
    "\n",
    "plt.subplot(2,3,3)\n",
    "plt.title(\"residual normal\")\n",
    "heatmap(p_x - re_p_x, cmap=\"Greys\")\n",
    "\n",
    "plt.subplot(2,3,4)\n",
    "plt.title(\"feature of abnormal wave\")\n",
    "heatmap(n_x)\n",
    "\n",
    "plt.subplot(2,3,5)\n",
    "plt.title(\"reconstructed feature of abnormal wave\")\n",
    "heatmap(re_n_x)\n",
    "\n",
    "plt.subplot(2,3,6)\n",
    "plt.title(\"residual abnormal\")\n",
    "heatmap(n_x - re_n_x, cmap=\"Greys\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# AE+CNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ASCNNAE(\n",
       "  (Encoder): Sequential(\n",
       "    (0): Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (3): ELU(alpha=1.0)\n",
       "    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "    (5): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (6): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (7): ELU(alpha=1.0)\n",
       "    (8): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (9): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (10): ELU(alpha=1.0)\n",
       "    (11): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (12): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (13): ELU(alpha=1.0)\n",
       "    (14): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (15): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (16): ELU(alpha=1.0)\n",
       "    (17): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "    (18): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "  )\n",
       "  (Decoder): Sequential(\n",
       "    (0): ConvTranspose2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (1): ELU(alpha=1.0)\n",
       "    (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (3): ConvTranspose2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n",
       "    (4): ELU(alpha=1.0)\n",
       "    (5): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (6): ConvTranspose2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (7): ELU(alpha=1.0)\n",
       "    (8): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (9): ConvTranspose2d(64, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (10): ELU(alpha=1.0)\n",
       "    (11): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (12): ConvTranspose2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (13): ConvTranspose2d(32, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n",
       "    (14): ELU(alpha=1.0)\n",
       "    (15): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (16): ConvTranspose2d(16, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (17): Sigmoid()\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# visualise\n",
    "model = ASCNNAE()\n",
    "model.load_state_dict(torch.load(\"./model/Haier/ascnnae.pkl\"))\n",
    "model.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((770, 25600), (770,))"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "loader = DataLoader(\n",
    "        meta_path=\"./data/meta.json\",\n",
    "        batch_size=64,\n",
    "        train=True,\n",
    "        shuffle=True,\n",
    "        mode=\"logmel\"\n",
    "    )\n",
    "\n",
    "train_x = []\n",
    "train_y = []\n",
    "for (bx, by) in loader:\n",
    "    bottle_neck, reconstruct = model(bx)\n",
    "    residual = bx - reconstruct\n",
    "    # bottle_neck = bottle_neck.flatten(1).tolist()\n",
    "    residual = residual.flatten(1)\n",
    "    bx = bx.flatten(1)\n",
    "    features = torch.cat([bx, residual], dim=1).tolist()\n",
    "    for x in features:\n",
    "        train_x.append(x)\n",
    "    train_y.extend(by.flatten(0).tolist())\n",
    "\n",
    "train_x = np.array(train_x)\n",
    "train_y = np.array(train_y)\n",
    "\n",
    "train_x.shape, train_y.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "classifier = ASSVM(C=100)\n",
    "classifier.fit(train_x, train_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((194, 25600), (194,))"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "loader = DataLoader(\n",
    "        meta_path=\"./data/meta.json\",\n",
    "        batch_size=64,\n",
    "        train=False,\n",
    "        shuffle=True,\n",
    "        mode=\"logmel\"\n",
    "    )\n",
    "\n",
    "test_x = []\n",
    "test_y = []\n",
    "for (bx, by) in loader:\n",
    "    _, reconstruct = model(bx)\n",
    "    residual = bx - reconstruct\n",
    "    residual = residual.flatten(1)\n",
    "    bx = bx.flatten(1)\n",
    "    features = torch.cat([bx, residual], dim=1).tolist()\n",
    "    for x in features:\n",
    "        test_x.append(x)\n",
    "    test_y.extend(by.flatten(0).tolist())\n",
    "\n",
    "test_x = np.array(test_x)\n",
    "test_y = np.array(test_y)\n",
    "\n",
    "test_x.shape, test_y.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "acc :  1.0\n",
      "auc :  1.0\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0      1.000     1.000     1.000       119\n",
      "           1      1.000     1.000     1.000        75\n",
      "\n",
      "    accuracy                          1.000       194\n",
      "   macro avg      1.000     1.000     1.000       194\n",
      "weighted avg      1.000     1.000     1.000       194\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(\"acc : \", classifier.score(test_x, test_y))\n",
    "print(\"auc : \", classifier.roc_auc(test_x, test_y))\n",
    "classifier.report(test_x, test_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(model, \"./model/Haier/AEResNet-lightning/autoencoder.pkl\")\n",
    "classifier.save_model(\"./model/Haier/AEResNet-lightning/classifier.joblib\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## time test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X = torch.randn(1, 1, 128, 100)\n",
    "start = time()\n",
    "model(X)\n",
    "print(time() - start)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# AE+CNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:02,  2.49s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "bx shape : torch.Size([64, 1, 128, 100]), by shape : torch.Size([64])\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "18it [00:43,  2.39s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Counter({0: 808, 1: 325}) ratio: 0.2868490732568402 0.7131509267431597\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "autoencoder = ASCNNAE()\n",
    "autoencoder.load_state_dict(torch.load(\"./model/ascnnae_1.0.pkl\"))\n",
    "\n",
    "loader = DataLoader(\n",
    "        meta_path=\"./data/meta_0db_id_00.json\",\n",
    "        batch_size=64,\n",
    "        train=True,\n",
    "        shuffle=True,\n",
    "        mode=\"logmel\",\n",
    "        n_flag=\"abnormal\",\n",
    "        p_flag=\"normal\"\n",
    "    )\n",
    "\n",
    "iter_num = 0\n",
    "labels = []\n",
    "for (bx, by) in tqdm(loader):\n",
    "    iter_num += 1\n",
    "    labels.extend(by.flatten().tolist())\n",
    "    if iter_num == 1:\n",
    "        print(\"bx shape : {}, by shape : {}\".format(bx.shape, by.shape))\n",
    "\n",
    "c = Counter(labels)\n",
    "p_n, n_n = c[0], c[1]\n",
    "r1, r2 = n_n / (p_n + n_n), p_n / (p_n + n_n)\n",
    "print(c, \"ratio:\", r1, r2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "classifier = ASCNN(input_channel=3)\n",
    "optimizer = optim.Adam(classifier.parameters(), lr=1e-4)\n",
    "loss_func = nn.CrossEntropyLoss(weight=torch.tensor([r1, r2]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "loss=0.316, acc=1.0: 100%|██████████| 18/18 [02:43<00:00,  9.06s/it]\n",
      "loss=0.35, acc=0.978: 100%|██████████| 18/18 [02:41<00:00,  8.97s/it]\n",
      "loss=0.313, acc=1.0: 100%|██████████| 18/18 [02:38<00:00,  8.80s/it]\n",
      "loss=0.318, acc=1.0: 100%|██████████| 18/18 [02:36<00:00,  8.71s/it]\n",
      "loss=0.314, acc=1.0: 100%|██████████| 18/18 [02:39<00:00,  8.84s/it]\n",
      "loss=0.314, acc=1.0: 100%|██████████| 18/18 [02:38<00:00,  8.78s/it]\n",
      "loss=0.313, acc=1.0: 100%|██████████| 18/18 [02:38<00:00,  8.81s/it]\n",
      "loss=0.313, acc=1.0: 100%|██████████| 18/18 [02:37<00:00,  8.78s/it]\n",
      "loss=0.315, acc=1.0: 100%|██████████| 18/18 [02:38<00:00,  8.83s/it]\n",
      "loss=0.314, acc=1.0: 100%|██████████| 18/18 [02:42<00:00,  9.02s/it]\n"
     ]
    }
   ],
   "source": [
    "autoencoder.train()\n",
    "classifier.eval()\n",
    "\n",
    "for epoch in range(10):\n",
    "    loader = DataLoader(\n",
    "        meta_path=\"./data/meta_0db_id_00.json\",\n",
    "        batch_size=64,\n",
    "        train=True,\n",
    "        shuffle=True,\n",
    "        mode=\"logmel\",\n",
    "        n_flag=\"abnormal\",\n",
    "        p_flag=\"normal\"\n",
    "    )\n",
    "\n",
    "    iter_wapper = tqdm(range(iter_num))\n",
    "    for i, (bx, by) in zip(iter_wapper, loader):\n",
    "        _, re_bx = autoencoder(bx)\n",
    "        residual = re_bx - bx\n",
    "        feature = torch.cat([bx, re_bx, residual], dim=1)\n",
    "        output = classifier(feature)\n",
    "        pre_lab = output.argmax(1)\n",
    "        loss = loss_func(output, by)\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        iter_wapper.set_description_str(\"loss={}, acc={}\".format(\n",
    "            round(loss.item(), 3), round(accuracy_score(pre_lab.flatten(), by.flatten()), 3)\n",
    "        ))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "5it [00:19,  3.90s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "acc : {} 0.9649122807017544\n",
      "auc : {} 0.953562417397573\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "pre_labels = []\n",
    "true_labels = []\n",
    "\n",
    "autoencoder.eval()\n",
    "classifier.eval()\n",
    "\n",
    "loader = DataLoader(\n",
    "        meta_path=\"./data/meta_0db_id_00.json\",\n",
    "        batch_size=64,\n",
    "        train=False,\n",
    "        shuffle=True,\n",
    "        mode=\"logmel\",\n",
    "        n_flag=\"abnormal\",\n",
    "        p_flag=\"normal\"\n",
    ")\n",
    "\n",
    "for (bx, by) in tqdm(loader):\n",
    "    _, re_bx = autoencoder(bx)\n",
    "    residual = re_bx - bx\n",
    "    feature = torch.cat([bx, re_bx, residual], dim=1)\n",
    "    output = classifier(feature)\n",
    "    pre_lab = output.argmax(1)\n",
    "    pre_labels.extend(pre_lab.flatten().tolist())\n",
    "    true_labels.extend(by.flatten().tolist())\n",
    "\n",
    "print(\"acc : {}\", accuracy_score(true_labels, pre_labels))\n",
    "print(\"auc : {}\", roc_auc_score(true_labels, pre_labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(autoencoder, \"./model/AEResNet0/autoencoder.pkl\")\n",
    "torch.save(classifier, \"./model/AEResNet0/classifier.pkl\")"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "4ce0e62306dd6a5716965d4519ada776f947e6dfc145b604b11307c10277ef29"
  },
  "kernelspec": {
   "display_name": "Python 3.7.6 64-bit",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
