{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"frequency_domain_subnetwork.ipynb","provenance":[],"collapsed_sections":[],"toc_visible":true,"authorship_tag":"ABX9TyPFGQl/7By4TvggRfDH48tL"},"kernelspec":{"display_name":"Python 3","name":"python3"}},"cells":[{"cell_type":"markdown","metadata":{"id":"I90IxZUH75z0"},"source":["## Initial Configuration\n","Some setup before building and running the model"]},{"cell_type":"code","metadata":{"id":"XVQmhHSV6Qm1","executionInfo":{"status":"ok","timestamp":1604415612579,"user_tz":-480,"elapsed":4045,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}}},"source":["# some basic things to include\n","import torch\n","import torch.nn as nn\n","from torchvision import datasets, transforms\n","import numpy as np\n","\n","from PIL import Image\n","import matplotlib.pyplot as plt\n","\n","# some preprocessing techniques\n","from scipy.fftpack import dct\n","\n","# this is only required for Google Colab\n","from google.colab import drive"],"execution_count":1,"outputs":[]},{"cell_type":"code","metadata":{"id":"i4I5YTJEdXdo","executionInfo":{"status":"ok","timestamp":1604415662519,"user_tz":-480,"elapsed":53979,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}},"outputId":"e837b124-27eb-458a-8304-0c731f8e8e2c","colab":{"base_uri":"https://localhost:8080/"}},"source":["!gdown --id 1gjbSZV5NjjIVOqSP-yTCqXcGPX9PnYn5\n","!unzip -q '/content/MM17-WeiboRumorSet.zip'"],"execution_count":2,"outputs":[{"output_type":"stream","text":["Downloading...\n","From: https://drive.google.com/uc?id=1gjbSZV5NjjIVOqSP-yTCqXcGPX9PnYn5\n","To: /content/MM17-WeiboRumorSet.zip\n","1.35GB [00:20, 65.5MB/s]\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"4UF1Dk0M-m2u"},"source":["![freq.png]()"]},{"cell_type":"markdown","metadata":{"id":"f_QrzR2eeflR"},"source":["## DCT and DFT\n","\n","DCT will be used to preprocess the input data. \\\n","We want to retrieve information in the frequency domain, rather than pixels.\n","\n","After that, we apply DFT to get the frequency information of frequency domain."]},{"cell_type":"code","metadata":{"id":"9u08jdFbehnl","executionInfo":{"status":"ok","timestamp":1604415662519,"user_tz":-480,"elapsed":53975,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}}},"source":["'''\n","Usage:\n","as image preprocessor by calling DCT() as a transform\n","\n","input: 128*128*3 PIL image\n","output: 64*256 torch array (histogram)\n","\n","[128*128] => (crop) => [256 * [8*8]] => (DCT_2d) => [256 * [8 * 8]] => reshape => [256 * 64]  \n","'''\n","class DCT(object):\n","    def __init__(self):\n","        self.BLOCK_HEIGHT = 8\n","        self.BLOCK_WIDTH = 8\n","        self.BLOCK_SIZE = (self.BLOCK_HEIGHT, self.BLOCK_WIDTH)\n","\n","    def div_block(self, img, block_size):\n","        img_height = img.height\n","        img_width = img.width\n","        block_height = block_size[0]\n","        block_width = block_size[1]\n","        assert(img_height % block_height == 0)\n","        assert(img_width % block_width == 0)\n","\n","        blocks = []\n","        for i in range(0,img_height,block_height):\n","            for j in range(0,img_width,block_width):\n","                box = (j, i, j+block_width, i+block_height)\n","                block = np.array(img.crop(box))\n","                blocks.append(block)\n","        return np.array(blocks)\n","\n","    def dct2(self, array_2d):\n","        return dct(dct(array_2d.T, norm = 'ortho').T, norm = 'ortho')\n","\n","    def _dct2(self, array_2d):\n","        return dct(dct(array_2d, norm = 'ortho').T, norm = 'ortho').T\n","\n","    def __call__(self, img):\n","        image = img\n","        blocks = self.div_block(image, self.BLOCK_SIZE)\n","        b_blocks, g_blocks, r_blocks = blocks[:, :, :, 0], blocks[:, :, :, 1], blocks[:, :, :, 2]\n","        test_blocks = (b_blocks + g_blocks + r_blocks) / 3 # naive greyscale\n","        result = np.array([self._dct2(test_block) for test_block in test_blocks])\n","        # return a torch.tensor\n","        return torch.from_numpy(result.reshape(256, 64).T).float()\n","\n","    def __repr__(self):\n","        return \"Simply DCT. What do you expect?\"\n","\n","'''\n","Usage: Same as DCT()\n","\n","input: 64*256 torch array (histogram)\n","output: 64*256 torch array (frequency histogram)\n","'''\n","class DFT(object):\n","    def __init__(self):\n","        pass\n","\n","    def __call__(self, freq):\n","        # convert into complex form containing real and imaginary part\n","        cmplx = torch.from_numpy(np.zeros((freq.shape[0], freq.shape[1], 2)))\n","        cmplx[:, :, 0] += freq\n","        out = torch.fft(cmplx, 1)[:, :, 0]\n","        return out\n","\n","    def __repr__(self):\n","        return \"Simply DFT. What do you expect?\"\n","\n","class Ycbcr_convert():\n","    def __init__(self):\n","        pass\n","\n","    def __call__(self, img):\n","        return img.convert('YCbCr')\n","\n","    def __repr__(self):\n","        return \"Convert a PIL Image from RGB to YCbCr\""],"execution_count":3,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"q1g1Mff92B8t"},"source":["## Trainging/Test Data Loading\n","\n","We use the DCT defined above as one of our preprocess step."]},{"cell_type":"code","metadata":{"id":"Ok0GXa-z2CGQ","executionInfo":{"status":"ok","timestamp":1604415663857,"user_tz":-480,"elapsed":55309,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}},"outputId":"6ab6add5-6dcf-4846-c74a-54bf750fbefb","colab":{"base_uri":"https://localhost:8080/"}},"source":["def image_show(np_image):\n","    plt.figure(figsize = (5,5))\n","    plt.imshow(np_image) # it should be a numpy array\n","    plt.show()\n","\n","# mainfolder = 'drive/My Drive/COMP5331 Fall 2020/MM17-WeiboRumorSet/'\n","mainfolder = 'MM17-WeiboRumorSet/'\n","image_height = 128\n","image_width  = 128\n","mytransform = transforms.Compose([\n","    transforms.Resize((image_height,image_width), interpolation=Image.BICUBIC),\n","    Ycbcr_convert(),\n","    DCT(),\n","    # DFT()\n","])\n","\n","dataset = datasets.ImageFolder(mainfolder, transform=mytransform) \n","# NOTE: The path should point to a place with subfolders (which contain images inside).\n","#  It will report bugs if there is no subfolder.\n","# print(type(dataset))\n","print('Total no. of images: ', len(dataset))\n","\n","print(round(len(dataset)*0.5))\n","\n","# 50% train, 50% test\n","trainset, testset = torch.utils.data.random_split(dataset, [round(len(dataset)*0.8), round(len(dataset)*0.2)])\n","print('Total no. of train set images: ', len(trainset))\n","print('Total no. of test set images: ', len(testset))\n","\n","labels = dataset.class_to_idx # the dataset saves the subfolder's name as the labels\n","# print(labels) \n","# print(type(labels))\n","\n","classes = list(labels.keys()) # convert dict keys into list\n","print('classes:', classes)"],"execution_count":4,"outputs":[{"output_type":"stream","text":["Total no. of images:  13249\n","6624\n","Total no. of train set images:  10599\n","Total no. of test set images:  2650\n","classes: ['nonrumor_images', 'rumor_images', 'tweets']\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"GFqY4wet2R50","executionInfo":{"status":"ok","timestamp":1604415663858,"user_tz":-480,"elapsed":55306,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}}},"source":["batch_size = 32\n","\n","trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True) \n","testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True) "],"execution_count":5,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"kpsjwNSWrS9A"},"source":["## Attention\n","\n","At some point I realize attention is not going to work for single vector. \\\\\n","So maybe we can add it back after integration."]},{"cell_type":"markdown","metadata":{"id":"DISMdyjSfC8d"},"source":["## Frequency Domain Subnetwork\n","\n","Basically, this is the model to train"]},{"cell_type":"code","metadata":{"id":"tNiJCvy99sOo","executionInfo":{"status":"ok","timestamp":1604415663858,"user_tz":-480,"elapsed":55303,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}}},"source":["class Frequent_Domain_Subnetwork(nn.Module):\n","    def __init__(self):\n","        super(Frequent_Domain_Subnetwork, self).__init__()\n","        self.backbone = nn.Sequential(nn.Conv1d(64, 32, 3, padding=1),\n","                             nn.BatchNorm1d(32),\n","                             nn.ReLU(),\n","                             nn.MaxPool1d(2),\n","                             nn.Conv1d(32, 64, 3, padding=1),\n","                             nn.BatchNorm1d(64),\n","                             nn.ReLU(),\n","                             nn.MaxPool1d(2),\n","                             nn.Conv1d(64, 128, 3, padding=1),\n","                             nn.BatchNorm1d(128),\n","                             nn.ReLU(),\n","                             nn.MaxPool1d(2),\n","                             nn.Flatten(),\n","                             nn.Linear(4096, 64),\n","                             nn.ReLU(),\n","                             nn.Linear(64, 64))\n","\n","    def forward(self, x):\n","        out = self.backbone.forward(x)\n","        return out    "],"execution_count":6,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"E7NtBms2ljuC"},"source":["## Let's Start Training"]},{"cell_type":"code","metadata":{"id":"pPXhpMZKJjvr","executionInfo":{"status":"ok","timestamp":1604415664358,"user_tz":-480,"elapsed":55800,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}}},"source":["MAX_EPOCH = 6\n","print_every = 20\n","\n","device = 'cpu'\n","model = Frequent_Domain_Subnetwork().to(device)\n","criterion = nn.CrossEntropyLoss()\n","optimizer = torch.optim.Adam(model.parameters(), lr=0.0002)"],"execution_count":7,"outputs":[]},{"cell_type":"code","metadata":{"id":"w0hBUJHlAlQN","executionInfo":{"status":"ok","timestamp":1604417127693,"user_tz":-480,"elapsed":1519133,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}},"outputId":"14967207-1427-4f79-eb25-10fdcb4ae72b","colab":{"base_uri":"https://localhost:8080/"}},"source":["for epoch in range(MAX_EPOCH):\n","    total_loss, total_acc = 0, 0\n","    cnt = 0\n","    for i, data in enumerate(trainloader):\n","        X, y = data[0].float().to(device), data[1].to(device)\n","        optimizer.zero_grad() \n","       \n","        # forward\n","        out = model(X)        \n","        #print(out, y_pred)\n","        loss = criterion(out, y)\n","\n","        # backward\n","        loss.backward()\n","        optimizer.step()\n","\n","        # stats\n","        y_pred = torch.argmax(out, dim=1)\n","        total_acc += (y_pred == y).sum().item() / len(y_pred)\n","        total_loss += loss.item()\n","\n","        cnt += 1\n","        if i % print_every == 0:\n","            avg_loss = total_loss / cnt\n","            avg_acc = total_acc / cnt\n","            total_acc, total_loss = 0, 0\n","            cnt = 0\n","            # print(out.T, '\\n', y_pred.T, '\\n', y.T)\n","            print('[Epoch %d Iter %d] Loss: %5f  Acc: %5f' % (epoch+1, i+1, avg_loss, avg_acc))\n","\n","# save the model somewhere\n","torch.save(model.state_dict(), \"model\")"],"execution_count":8,"outputs":[{"output_type":"stream","text":["[Epoch 1 Iter 1] Loss: 3.917292  Acc: 0.187500\n","[Epoch 1 Iter 21] Loss: 1.272259  Acc: 0.532813\n","[Epoch 1 Iter 41] Loss: 0.701817  Acc: 0.567187\n","[Epoch 1 Iter 61] Loss: 0.658431  Acc: 0.582812\n","[Epoch 1 Iter 81] Loss: 0.658863  Acc: 0.590625\n","[Epoch 1 Iter 101] Loss: 0.656594  Acc: 0.571875\n","[Epoch 1 Iter 121] Loss: 0.650966  Acc: 0.620313\n","[Epoch 1 Iter 141] Loss: 0.677577  Acc: 0.571875\n","[Epoch 1 Iter 161] Loss: 0.636946  Acc: 0.604688\n","[Epoch 1 Iter 181] Loss: 0.607401  Acc: 0.632812\n","[Epoch 1 Iter 201] Loss: 0.653169  Acc: 0.576562\n","[Epoch 1 Iter 221] Loss: 0.613083  Acc: 0.626563\n","[Epoch 1 Iter 241] Loss: 0.605826  Acc: 0.646875\n","[Epoch 1 Iter 261] Loss: 0.638367  Acc: 0.632812\n","[Epoch 1 Iter 281] Loss: 0.628038  Acc: 0.607812\n","[Epoch 1 Iter 301] Loss: 0.611925  Acc: 0.639062\n","[Epoch 1 Iter 321] Loss: 0.633337  Acc: 0.620313\n","[Epoch 2 Iter 1] Loss: 0.586623  Acc: 0.750000\n","[Epoch 2 Iter 21] Loss: 0.583884  Acc: 0.665625\n","[Epoch 2 Iter 41] Loss: 0.582064  Acc: 0.676562\n","[Epoch 2 Iter 61] Loss: 0.572249  Acc: 0.681250\n","[Epoch 2 Iter 81] Loss: 0.592665  Acc: 0.675000\n","[Epoch 2 Iter 101] Loss: 0.586727  Acc: 0.676562\n","[Epoch 2 Iter 121] Loss: 0.574581  Acc: 0.681250\n","[Epoch 2 Iter 141] Loss: 0.577396  Acc: 0.665625\n","[Epoch 2 Iter 161] Loss: 0.574312  Acc: 0.676562\n","[Epoch 2 Iter 181] Loss: 0.550539  Acc: 0.717187\n","[Epoch 2 Iter 201] Loss: 0.582961  Acc: 0.695312\n","[Epoch 2 Iter 221] Loss: 0.596269  Acc: 0.651563\n","[Epoch 2 Iter 241] Loss: 0.575151  Acc: 0.657813\n","[Epoch 2 Iter 261] Loss: 0.569628  Acc: 0.706250\n","[Epoch 2 Iter 281] Loss: 0.564243  Acc: 0.696875\n","[Epoch 2 Iter 301] Loss: 0.557526  Acc: 0.695312\n","[Epoch 2 Iter 321] Loss: 0.563164  Acc: 0.696875\n","[Epoch 3 Iter 1] Loss: 0.494561  Acc: 0.750000\n","[Epoch 3 Iter 21] Loss: 0.526971  Acc: 0.743750\n","[Epoch 3 Iter 41] Loss: 0.467957  Acc: 0.767188\n","[Epoch 3 Iter 61] Loss: 0.514035  Acc: 0.743750\n","[Epoch 3 Iter 81] Loss: 0.467970  Acc: 0.773438\n","[Epoch 3 Iter 101] Loss: 0.498867  Acc: 0.768750\n","[Epoch 3 Iter 121] Loss: 0.489295  Acc: 0.776563\n","[Epoch 3 Iter 141] Loss: 0.506737  Acc: 0.737500\n","[Epoch 3 Iter 161] Loss: 0.517641  Acc: 0.734375\n","[Epoch 3 Iter 181] Loss: 0.511916  Acc: 0.748437\n","[Epoch 3 Iter 201] Loss: 0.477596  Acc: 0.776563\n","[Epoch 3 Iter 221] Loss: 0.543238  Acc: 0.721875\n","[Epoch 3 Iter 241] Loss: 0.475238  Acc: 0.759375\n","[Epoch 3 Iter 261] Loss: 0.495125  Acc: 0.756250\n","[Epoch 3 Iter 281] Loss: 0.499814  Acc: 0.773438\n","[Epoch 3 Iter 301] Loss: 0.490178  Acc: 0.762500\n","[Epoch 3 Iter 321] Loss: 0.470149  Acc: 0.760938\n","[Epoch 4 Iter 1] Loss: 0.484064  Acc: 0.687500\n","[Epoch 4 Iter 21] Loss: 0.412814  Acc: 0.814063\n","[Epoch 4 Iter 41] Loss: 0.420998  Acc: 0.810937\n","[Epoch 4 Iter 61] Loss: 0.424047  Acc: 0.812500\n","[Epoch 4 Iter 81] Loss: 0.391437  Acc: 0.834375\n","[Epoch 4 Iter 101] Loss: 0.409252  Acc: 0.807813\n","[Epoch 4 Iter 121] Loss: 0.389415  Acc: 0.837500\n","[Epoch 4 Iter 141] Loss: 0.457963  Acc: 0.767188\n","[Epoch 4 Iter 161] Loss: 0.442768  Acc: 0.796875\n","[Epoch 4 Iter 181] Loss: 0.409006  Acc: 0.806250\n","[Epoch 4 Iter 201] Loss: 0.428138  Acc: 0.798438\n","[Epoch 4 Iter 221] Loss: 0.466643  Acc: 0.770312\n","[Epoch 4 Iter 241] Loss: 0.387682  Acc: 0.831250\n","[Epoch 4 Iter 261] Loss: 0.442388  Acc: 0.782813\n","[Epoch 4 Iter 281] Loss: 0.385563  Acc: 0.828125\n","[Epoch 4 Iter 301] Loss: 0.412142  Acc: 0.815625\n","[Epoch 4 Iter 321] Loss: 0.391735  Acc: 0.807813\n","[Epoch 5 Iter 1] Loss: 0.266551  Acc: 0.968750\n","[Epoch 5 Iter 21] Loss: 0.331330  Acc: 0.876563\n","[Epoch 5 Iter 41] Loss: 0.333724  Acc: 0.868750\n","[Epoch 5 Iter 61] Loss: 0.356656  Acc: 0.843750\n","[Epoch 5 Iter 81] Loss: 0.310221  Acc: 0.885938\n","[Epoch 5 Iter 101] Loss: 0.327432  Acc: 0.868750\n","[Epoch 5 Iter 121] Loss: 0.345940  Acc: 0.862500\n","[Epoch 5 Iter 141] Loss: 0.298263  Acc: 0.892188\n","[Epoch 5 Iter 161] Loss: 0.386378  Acc: 0.820312\n","[Epoch 5 Iter 181] Loss: 0.330550  Acc: 0.846875\n","[Epoch 5 Iter 201] Loss: 0.332959  Acc: 0.875000\n","[Epoch 5 Iter 221] Loss: 0.337098  Acc: 0.860938\n","[Epoch 5 Iter 241] Loss: 0.373627  Acc: 0.829688\n","[Epoch 5 Iter 261] Loss: 0.317457  Acc: 0.854688\n","[Epoch 5 Iter 281] Loss: 0.333484  Acc: 0.851562\n","[Epoch 5 Iter 301] Loss: 0.316607  Acc: 0.859375\n","[Epoch 5 Iter 321] Loss: 0.334692  Acc: 0.853125\n","[Epoch 6 Iter 1] Loss: 0.246384  Acc: 0.937500\n","[Epoch 6 Iter 21] Loss: 0.249373  Acc: 0.920312\n","[Epoch 6 Iter 41] Loss: 0.266754  Acc: 0.906250\n","[Epoch 6 Iter 61] Loss: 0.217313  Acc: 0.929688\n","[Epoch 6 Iter 81] Loss: 0.230985  Acc: 0.920312\n","[Epoch 6 Iter 101] Loss: 0.230983  Acc: 0.928125\n","[Epoch 6 Iter 121] Loss: 0.277997  Acc: 0.885938\n","[Epoch 6 Iter 141] Loss: 0.278836  Acc: 0.885938\n","[Epoch 6 Iter 161] Loss: 0.268085  Acc: 0.889062\n","[Epoch 6 Iter 181] Loss: 0.324757  Acc: 0.859375\n","[Epoch 6 Iter 201] Loss: 0.282118  Acc: 0.895312\n","[Epoch 6 Iter 221] Loss: 0.282639  Acc: 0.884375\n","[Epoch 6 Iter 241] Loss: 0.255344  Acc: 0.898438\n","[Epoch 6 Iter 261] Loss: 0.254631  Acc: 0.895312\n","[Epoch 6 Iter 281] Loss: 0.310192  Acc: 0.875000\n","[Epoch 6 Iter 301] Loss: 0.280160  Acc: 0.890625\n","[Epoch 6 Iter 321] Loss: 0.305266  Acc: 0.868750\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"GS-ZXZNEpSbj"},"source":["## Test\n","\n","run the following block if you want the model to be loaded instead of trained."]},{"cell_type":"code","metadata":{"id":"kly-O_-OpWoX","executionInfo":{"status":"ok","timestamp":1604417127697,"user_tz":-480,"elapsed":1519134,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}},"outputId":"bd332dbd-5989-475c-9007-2cb20d19a037","colab":{"base_uri":"https://localhost:8080/"}},"source":["model.load_state_dict(torch.load(\"model\"))"],"execution_count":9,"outputs":[{"output_type":"execute_result","data":{"text/plain":["<All keys matched successfully>"]},"metadata":{"tags":[]},"execution_count":9}]},{"cell_type":"code","metadata":{"id":"5I6qKsebOPE5","executionInfo":{"status":"ok","timestamp":1604417127697,"user_tz":-480,"elapsed":1519130,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}}},"source":["from sklearn.metrics import accuracy_score # normal accuracy\n","from sklearn.metrics import balanced_accuracy_score # used in case of imbalanced data sets, average of recall, from 0 to 1\n","from sklearn.metrics import confusion_matrix # division of performance on the multilabels\n","from sklearn.metrics import cohen_kappa_score # compares model against random prediction, from -1 to 1\n","from sklearn.metrics import classification_report # for multilabel classification, gives precision, recall, f score, support, more\n","target_names = ['class 0', 'class 1']\n","\n","def print_metrics(y_true, y_pred):\n","    print(\"Accuracy:\", accuracy_score(y_true, y_pred))\n","    print(\"Balanced Accuracy:\" , balanced_accuracy_score(y_true, y_pred))\n","    print(\"Confusion Matrix:\\n\", confusion_matrix(y_true, y_pred))\n","    print(\"Cohen Kappa Score:\", cohen_kappa_score(y_true, y_pred))\n","    print(\"Classification Report:\\n\", classification_report(y_true, y_pred, target_names=target_names))"],"execution_count":10,"outputs":[]},{"cell_type":"code","metadata":{"id":"NyskqxlVrAqQ","executionInfo":{"status":"ok","timestamp":1604417186531,"user_tz":-480,"elapsed":1577962,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}},"outputId":"c819790c-76db-4448-986a-c5e36cdebbf1","colab":{"base_uri":"https://localhost:8080/"}},"source":["report_every = 10\n","acc, loss = 0, 0\n","cnt = 0\n","model.eval()\n","y_true = []\n","y_pred = []\n","with torch.no_grad():\n","    for data in testloader:\n","        X, y = data[0].float().to(device), data[1].to(device)    \n","        # prediction\n","        out = model(X)        \n","        pred = torch.argmax(out, dim=1)\n","\n","        y_true.append(y)\n","        y_pred.append(pred)\n","\n","        cnt += 1\n","\n","        if cnt % report_every == 0:\n","            print(\"[Test] %d / %d batches tested\" % (cnt, testloader.__len__()))        \n","\n","    print(\"[Test] %d / %d batches tested\" % (cnt, testloader.__len__()))\n","    y_true = torch.cat(y_true, dim=0)\n","    y_pred = torch.cat(y_pred, dim=0)\n","    print_metrics(y_true, y_pred)"],"execution_count":11,"outputs":[{"output_type":"stream","text":["[Test] 10 / 83 batches tested\n","[Test] 20 / 83 batches tested\n","[Test] 30 / 83 batches tested\n","[Test] 40 / 83 batches tested\n","[Test] 50 / 83 batches tested\n","[Test] 60 / 83 batches tested\n","[Test] 70 / 83 batches tested\n","[Test] 80 / 83 batches tested\n","[Test] 83 / 83 batches tested\n","Accuracy: 0.74\n","Balanced Accuracy: 0.7224616486037124\n","Confusion Matrix:\n"," [[ 679  407]\n"," [ 282 1282]]\n","Cohen Kappa Score: 0.45288916829325143\n","Classification Report:\n","               precision    recall  f1-score   support\n","\n","     class 0       0.71      0.63      0.66      1086\n","     class 1       0.76      0.82      0.79      1564\n","\n","    accuracy                           0.74      2650\n","   macro avg       0.73      0.72      0.73      2650\n","weighted avg       0.74      0.74      0.74      2650\n","\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"DasnbExSKrDR","executionInfo":{"status":"ok","timestamp":1604417186532,"user_tz":-480,"elapsed":1577960,"user":{"displayName":"Chiu Wai Yan","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GjB1UpIXP4S96VOkkVB6pcUqM_SGJWWju3k9tP0Yg=s64","userId":"13481787162485730226"}}},"source":[""],"execution_count":11,"outputs":[]}]}