{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8 -*-#\n",
    "from datetime import date, datetime\n",
    "import os\n",
    "import random\n",
    "import math\n",
    "\n",
    "from adabelief_pytorch import AdaBelief\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from ray import tune\n",
    "from ray.tune.schedulers import AsyncHyperBandScheduler\n",
    "from ray.tune.suggest import ConcurrencyLimiter\n",
    "from ray.tune.suggest import Repeater\n",
    "from ray.tune.suggest.bayesopt import BayesOptSearch\n",
    "from ray.tune import Callback\n",
    "from ray.tune import JupyterNotebookReporter\n",
    "\n",
    "from scipy.fftpack import fft,dct\n",
    "import seaborn as sns\n",
    "from sklearn.metrics import classification_report\n",
    "import torch\n",
    "from torch.autograd import Variable\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "#from torch.cuda.amp import autocast, GradScaler\n",
    "from torch.optim import *\n",
    "from torch.utils.data import  RandomSampler, SequentialSampler, Dataset, DataLoader\n",
    "from torchvision import transforms\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "import torchvision\n",
    "from tqdm.notebook import tqdm, trange\n",
    "from transformers import BertTokenizer, BertConfig, BertModel, get_linear_schedule_with_warmup\n",
    "from tabulate import tabulate\n",
    "\n",
    "from IPython.display import display, HTML\n",
    "pd.options.display.max_columns = None\n",
    "\n",
    "environment = os.path.exists(\"/home/**/workspace\")\n",
    "\n",
    "if environment:\n",
    "    os.environ['CUDA_VISIBLE_DEVICES'] = '1'\n",
    "\n",
    "random.seed(42)\n",
    "os.environ['PYTHONHASHSEED'] = str(42)\n",
    "np.random.seed(42)\n",
    "torch.backends.cudnn.deterministic = True\n",
    "torch.backends.cudnn.benchmark = True\n",
    "\n",
    "reporter = JupyterNotebookReporter(overwrite=False, max_progress_rows=1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_dct_img(img):\n",
    "    img = img.numpy() #size = [1, 224, 224]\n",
    "    height = img.shape[1]\n",
    "    width = img.shape[2]\n",
    "    #print('height:{}'.format(height))\n",
    "    N = 8 \n",
    "    step = int(height/N) #28\n",
    "\n",
    "    dct_img = np.zeros((1, N*N, step*step, 1), dtype=np.float32) #[1,64,784,1]\n",
    "    fft_img = np.zeros((1, N*N, step*step, 1))\n",
    "    #print('dct_img:{}'.format(dct_img.shape))\n",
    "    \n",
    "    i = 0\n",
    "    for row in np.arange(0, height, step):\n",
    "        for col in np.arange(0, width, step):\n",
    "            block = np.array(img[:, row:(row+step), col:(col+step)], dtype=np.float32)\n",
    "            #print('block:{}'.format(block.shape))\n",
    "            block1 = block.reshape(-1, step*step, 1) #[batch_size,784,1]\n",
    "            dct_img[:, i,:,:] = dct(block1) #[batch_size, 64, 784, 1]\n",
    "\n",
    "            i += 1\n",
    "\n",
    "    #for i in range(64):\n",
    "    fft_img[:,:,:,:] = fft(dct_img[:,:,:,:]).real #[batch_size,64, 784,1]\n",
    "    \n",
    "    fft_img = torch.from_numpy(fft_img).float() #[batch_size, 64, 784, 1]\n",
    "    new_img = F.interpolate(fft_img, size=[250,1]) #[batch_size, 64, 250, 1]\n",
    "    new_img = new_img.squeeze(0).squeeze(-1) #torch.size = [64, 250]\n",
    "    \n",
    "    return new_img   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MyDataset(Dataset):\n",
    "    def __init__(self, data, VOCAB, max_sen_len, transform_vgg=None, transform_dct=None):\n",
    "        super(MyDataset, self).__init__()\n",
    "        \n",
    "        self.transform_vgg = transform_vgg\n",
    "        self.transform_dct = transform_dct\n",
    "        self.tokenizer = BertTokenizer.from_pretrained(VOCAB)\n",
    "        self.max_sen_len = max_sen_len\n",
    "        \n",
    "        self.post_id = torch.from_numpy(data['post_id'])\n",
    "        self.tweet_content = data['post_content']\n",
    "        #self.image = list(self.transform(data['image']))\n",
    "        self.image = list(data['image'])\n",
    "        self.label = torch.from_numpy(data['label']) #type:int\n",
    "        \n",
    "    def __getitem__(self, idx):\n",
    "        \n",
    "        content = str(self.tweet_content[idx])\n",
    "        text_content = self.tokenizer.encode_plus(content, add_special_tokens = True, padding = 'max_length', truncation = True, max_length = self.max_sen_len, return_tensors = 'pt')\n",
    "        \n",
    "        dct_img = self.transform_dct(self.image[idx].convert('L'))\n",
    "        dct_img = process_dct_img(dct_img)\n",
    "\n",
    "        return {\n",
    "            \"text_input_ids\": text_content[\"input_ids\"].flatten().clone().detach().type(torch.LongTensor),\n",
    "            \"attention_mask\": text_content[\"attention_mask\"].flatten().clone().detach().type(torch.LongTensor),\n",
    "            \"token_type_ids\": text_content[\"token_type_ids\"].flatten().clone().detach().type(torch.LongTensor),\n",
    "            \"image\": self.transform_vgg(self.image[idx]),\n",
    "            \"dct_img\": dct_img,\n",
    "            \"post_id\": self.post_id[idx],\n",
    "            \"label\": self.label[idx],\n",
    "        }\n",
    "    def __len__(self):\n",
    "        return len(self.label)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class vgg(nn.Module):\n",
    "    \"\"\"\n",
    "    obtain visual feature\n",
    "    \"\"\"\n",
    "    def __init__(self, model_dim, pthfile):\n",
    "        super(vgg, self).__init__()\n",
    "        self.model_dim = model_dim\n",
    "        self.pthfile = pthfile\n",
    "        \n",
    "        #image\n",
    "        vgg_19 = torchvision.models.vgg19(pretrained=False)\n",
    "        vgg_19.load_state_dict(torch.load(self.pthfile))\n",
    "\n",
    "        self.feature = vgg_19.features\n",
    "        self.classifier = nn.Sequential(*list(vgg_19.classifier.children())[:-3])\n",
    "        pretrained_dict = vgg_19.state_dict()\n",
    "        model_dict = self.classifier.state_dict()\n",
    "        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} #delect the last layer\n",
    "        model_dict.update(pretrained_dict) #update \n",
    "        self.classifier.load_state_dict(model_dict) #load the new parameter\n",
    "        \n",
    "    def forward(self, img):\n",
    "        #image\n",
    "        #image = self.vgg(img) #[batch, num_ftrs]\n",
    "        img = self.feature(img)\n",
    "        img = img.view(img.size(0), -1)\n",
    "        image = self.classifier(img)\n",
    "        \n",
    "        return image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class multimodal_attention(nn.Module):\n",
    "    \"\"\"\n",
    "    dot-product attention mechanism\n",
    "    \"\"\"\n",
    "    def __init__(self, attention_dropout=0.5):\n",
    "        super(multimodal_attention, self).__init__()\n",
    "        self.dropout = nn.Dropout(attention_dropout)\n",
    "        self.softmax = nn.Softmax(dim=2)\n",
    "\n",
    "    def forward(self, q, k, v, scale=None, attn_mask=None):\n",
    "       \n",
    "        attention = torch.matmul(q, k.transpose(-2, -1))\n",
    "        #print('attention.shape:{}'.format(attention.shape))\n",
    "        if scale:\n",
    "            attention = attention * scale\n",
    "\n",
    "        if attn_mask:\n",
    "            attention = attention.masked_fill_(attn_mask, -np.inf)\n",
    "        attention = self.softmax(attention)\n",
    "        #print('attention.shftmax:{}'.format(attention))\n",
    "        attention = self.dropout(attention)\n",
    "        attention = torch.matmul(attention, v)\n",
    "        #print('attn_final.shape:{}'.format(attention.shape))\n",
    "\n",
    "        return attention\n",
    "\n",
    "\n",
    "class MultiHeadAttention(nn.Module):\n",
    "    def __init__(self, model_dim=256, num_heads=8, dropout=0.5):\n",
    "        super(MultiHeadAttention, self).__init__()\n",
    "        \n",
    "        self.model_dim = model_dim\n",
    "        self.dim_per_head = model_dim // num_heads\n",
    "        self.num_heads = num_heads\n",
    "        self.linear_k = nn.Linear(1, self.dim_per_head * num_heads, bias=False)\n",
    "        self.linear_v = nn.Linear(1, self.dim_per_head * num_heads, bias=False)\n",
    "        self.linear_q = nn.Linear(1, self.dim_per_head * num_heads, bias=False)\n",
    "\n",
    "        self.dot_product_attention = multimodal_attention(dropout)\n",
    "        self.linear_final = nn.Linear(model_dim, 1, bias=False)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.layer_norm = nn.LayerNorm(model_dim)\n",
    "\n",
    "    def forward(self, query, key, value, attn_mask=None):\n",
    "        residual = query\n",
    "        query = query.unsqueeze(-1)\n",
    "        key = key.unsqueeze(-1)\n",
    "        value = value.unsqueeze(-1)\n",
    "        #print(\"query.shape:{}\".format(query.shape))\n",
    "\n",
    "        dim_per_head = self.dim_per_head\n",
    "        num_heads = self.num_heads\n",
    "        #batch_size = key.size(0)\n",
    "\n",
    "        # linear projection\n",
    "        key = self.linear_k(key)\n",
    "        value = self.linear_v(value)\n",
    "        query = self.linear_q(query)\n",
    "        #print('key.shape:{}'.format(key.shape))\n",
    "\n",
    "        # split by heads\n",
    "        key = key.view(-1, num_heads, self.model_dim, dim_per_head)\n",
    "        value = value.view(-1, num_heads, self.model_dim, dim_per_head)\n",
    "        query = query.view(-1, num_heads, self.model_dim, dim_per_head)\n",
    "\n",
    "        # scaled dot product attention\n",
    "        scale = (key.size(-1) // num_heads)**-0.5\n",
    "        attention = self.dot_product_attention(query, key, value, \n",
    "                                               scale, attn_mask)\n",
    "\n",
    "        attention = attention.view(-1, self.model_dim, dim_per_head * num_heads)\n",
    "        #print('attention_con_shape:{}'.format(attention.shape))\n",
    "\n",
    "        # final linear projection\n",
    "        output = self.linear_final(attention).squeeze(-1)\n",
    "        #print('output.shape:{}'.format(output.shape))\n",
    "        # dropout\n",
    "        output = self.dropout(output)\n",
    "        # add residual and norm layer\n",
    "        output = self.layer_norm(residual + output)\n",
    "\n",
    "        return output\n",
    "\n",
    "\n",
    "class PositionalWiseFeedForward(nn.Module):\n",
    "    \"\"\"\n",
    "    Fully-connected network \n",
    "    \"\"\"\n",
    "    def __init__(self, model_dim=256, ffn_dim=2048, dropout=0.5):\n",
    "        super(PositionalWiseFeedForward, self).__init__()\n",
    "        self.w1 = nn.Linear(model_dim, ffn_dim)\n",
    "        self.w2 = nn.Linear(ffn_dim, model_dim)\n",
    "        \n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.layer_norm = nn.LayerNorm(model_dim)\n",
    "\n",
    "    def forward(self, x):\n",
    "        residual = x\n",
    "\n",
    "        x = self.w2(F.relu(self.w1(x)))\n",
    "        x = self.dropout(x)\n",
    "        x += residual\n",
    "\n",
    "        x = self.layer_norm(x)\n",
    "        output = x\n",
    "        return output\n",
    "\n",
    "\n",
    "class multimodal_fusion_layer(nn.Module):\n",
    "    \"\"\"\n",
    "    A layer of fusing features \n",
    "    \"\"\"\n",
    "    def __init__(self, model_dim=256, num_heads=8, ffn_dim=2048, dropout=0.5):\n",
    "        super(multimodal_fusion_layer, self).__init__()\n",
    "        self.attention_1 = MultiHeadAttention(model_dim, num_heads, dropout)\n",
    "        self.attention_2 = MultiHeadAttention(model_dim, num_heads, dropout)\n",
    "        \n",
    "        self.feed_forward_1 = PositionalWiseFeedForward(model_dim, ffn_dim, dropout)\n",
    "        self.feed_forward_2 = PositionalWiseFeedForward(model_dim, ffn_dim, dropout)\n",
    "        \n",
    "        self.fusion_linear = nn.Linear(model_dim*2, model_dim)\n",
    "\n",
    "    def forward(self, image_output, text_output, attn_mask=None):\n",
    "\n",
    "        output_1 = self.attention_1(image_output, text_output, text_output,\n",
    "                                 attn_mask)\n",
    "        \n",
    "        output_2 = self.attention_2(text_output, image_output, image_output,\n",
    "                                 attn_mask)\n",
    "        \n",
    "        \n",
    "        #print('attention out_shape:{}'.format(output.shape))\n",
    "        output_1 = self.feed_forward_1(output_1)\n",
    "        output_2 = self.feed_forward_2(output_2)\n",
    "        \n",
    "        output = torch.cat([output_1, output_2], dim=1)\n",
    "        output = self.fusion_linear(output)\n",
    "\n",
    "        return output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def ConvBNRelu(in_channels, out_channels, kernel_size, stride=1, padding=0):\n",
    "    return nn.Sequential(\n",
    "        nn.Conv1d(\n",
    "            in_channels=in_channels,\n",
    "            out_channels=out_channels,\n",
    "            kernel_size=kernel_size,\n",
    "            stride=stride,\n",
    "            padding=padding,\n",
    "        ),\n",
    "        nn.BatchNorm1d(out_channels),\n",
    "        nn.ReLU(inplace=True),\n",
    "    )\n",
    "\n",
    "\n",
    "def ConvBNRelu2d(in_channels, out_channels, kernel_size, stride=1, padding=0):\n",
    "    return nn.Sequential(\n",
    "        nn.Conv2d(\n",
    "            in_channels=in_channels,\n",
    "            out_channels=out_channels,\n",
    "            kernel_size=(1, kernel_size),\n",
    "            stride=stride,\n",
    "            padding=padding,\n",
    "        ),\n",
    "        nn.BatchNorm2d(out_channels),\n",
    "        nn.ReLU(inplace=True),\n",
    "    )\n",
    "\n",
    "class DctStem(nn.Module):\n",
    "    def __init__(self, kernel_sizes, num_channels):\n",
    "        super(DctStem, self).__init__()\n",
    "        self.convs = nn.Sequential(\n",
    "            ConvBNRelu2d(in_channels=1,\n",
    "                         out_channels=num_channels[0],\n",
    "                         kernel_size=kernel_sizes[0]),\n",
    "            ConvBNRelu2d(\n",
    "                in_channels=num_channels[0],\n",
    "                out_channels=num_channels[1],\n",
    "                kernel_size=kernel_sizes[1],\n",
    "            ),\n",
    "            ConvBNRelu2d(\n",
    "                in_channels=num_channels[1],\n",
    "                out_channels=num_channels[2],\n",
    "                kernel_size=kernel_sizes[2],\n",
    "            ),\n",
    "            nn.MaxPool2d((1, 2)),\n",
    "        )\n",
    "\n",
    "    def forward(self, dct_img):\n",
    "        x = dct_img.unsqueeze(1)\n",
    "        img = self.convs(x)\n",
    "        img = img.permute(0, 2, 1, 3)\n",
    "\n",
    "        return img\n",
    "\n",
    "class DctInceptionBlock(nn.Module):\n",
    "    def __init__(\n",
    "        self,\n",
    "        in_channel=128,\n",
    "        branch1_channels=[64],\n",
    "        branch2_channels=[48, 64],\n",
    "        branch3_channels=[64, 96, 96],\n",
    "        branch4_channels=[32],\n",
    "    ):\n",
    "        super(DctInceptionBlock, self).__init__()\n",
    "\n",
    "        self.branch1 = ConvBNRelu2d(in_channels=in_channel,\n",
    "                                    out_channels=branch1_channels[0],\n",
    "                                    kernel_size=1)\n",
    "\n",
    "        self.branch2 = nn.Sequential(\n",
    "            ConvBNRelu2d(in_channels=in_channel,\n",
    "                         out_channels=branch2_channels[0],\n",
    "                         kernel_size=1),\n",
    "            ConvBNRelu2d(\n",
    "                in_channels=branch2_channels[0],\n",
    "                out_channels=branch2_channels[1],\n",
    "                kernel_size=3,\n",
    "                padding=(0, 1),\n",
    "            ),\n",
    "        )\n",
    "\n",
    "        self.branch3 = nn.Sequential(\n",
    "            ConvBNRelu2d(in_channels=in_channel,\n",
    "                         out_channels=branch3_channels[0],\n",
    "                         kernel_size=1),\n",
    "            ConvBNRelu2d(\n",
    "                in_channels=branch3_channels[0],\n",
    "                out_channels=branch3_channels[1],\n",
    "                kernel_size=3,\n",
    "                padding=(0, 1),\n",
    "            ),\n",
    "            ConvBNRelu2d(\n",
    "                in_channels=branch3_channels[1],\n",
    "                out_channels=branch3_channels[2],\n",
    "                kernel_size=3,\n",
    "                padding=(0, 1),\n",
    "            ),\n",
    "        )\n",
    "\n",
    "        self.branch4 = nn.Sequential(\n",
    "            nn.MaxPool2d(kernel_size=(1, 3), stride=1, padding=(0, 1)),\n",
    "            ConvBNRelu2d(in_channels=in_channel,\n",
    "                         out_channels=branch4_channels[0],\n",
    "                         kernel_size=1),\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "\n",
    "        x = x.permute(0, 2, 1, 3)\n",
    "        # y = x\n",
    "        out1 = self.branch1(x)\n",
    "        out2 = self.branch2(x)\n",
    "        out3 = self.branch3(x)\n",
    "        out4 = self.branch4(x)\n",
    "        out = torch.cat([out1, out2, out3, out4], dim=1)\n",
    "        out = out.permute(0, 2, 1, 3)\n",
    "\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DctCNN(nn.Module):\n",
    "    def __init__(self,\n",
    "                 model_dim,\n",
    "                 dropout,\n",
    "                 kernel_sizes,\n",
    "                 num_channels,\n",
    "                 in_channel=128,\n",
    "                 branch1_channels=[64],\n",
    "                 branch2_channels=[48, 64],\n",
    "                 branch3_channels=[64, 96, 96],\n",
    "                 branch4_channels=[32],\n",
    "                 out_channels=64):\n",
    "\n",
    "        super(DctCNN, self).__init__()\n",
    "\n",
    "        self.stem = DctStem(kernel_sizes, num_channels)\n",
    "\n",
    "        self.InceptionBlock = DctInceptionBlock(\n",
    "            in_channel,\n",
    "            branch1_channels,\n",
    "            branch2_channels,\n",
    "            branch3_channels,\n",
    "            branch4_channels,\n",
    "        )\n",
    "\n",
    "        self.maxPool = nn.MaxPool2d((1, 122))\n",
    "\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "\n",
    "        self.conv = ConvBNRelu2d(branch1_channels[-1] + branch2_channels[-1] +\n",
    "                               branch3_channels[-1] + branch4_channels[-1],\n",
    "                               out_channels,\n",
    "                               kernel_size=1)\n",
    "\n",
    "    def forward(self, dct_img):\n",
    "        dct_f = self.stem(dct_img)\n",
    "        x = self.InceptionBlock(dct_f)\n",
    "        x = self.maxPool(x)\n",
    "        x = x.permute(0, 2, 1, 3)\n",
    "        x = self.conv(x)\n",
    "        x = x.permute(0, 2, 1, 3)\n",
    "        x = x.squeeze(-1)\n",
    "        \n",
    "        x = x.reshape(-1,4096)\n",
    "\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class NetShareFusion(nn.Module):\n",
    "    def __init__(self,\n",
    "                 CASED,\n",
    "                 pthfile,\n",
    "                 kernel_sizes,\n",
    "                 num_channels,\n",
    "                 model_dim,\n",
    "                 drop_and_BN,\n",
    "                 bert_dim=768,\n",
    "                 img_size=250,\n",
    "                 num_labels=2,\n",
    "                 num_layers=1,\n",
    "                 num_heads=8,\n",
    "                 ffn_dim=2048,\n",
    "                 dropout=0.5):\n",
    "\n",
    "        super(NetShareFusion, self).__init__()\n",
    "\n",
    "        self.CASED = CASED\n",
    "        self.model_dim = model_dim\n",
    "        self.pthfile = pthfile\n",
    "        self.drop_and_BN = drop_and_BN\n",
    "\n",
    "        #text\n",
    "        self.config = BertConfig.from_pretrained(self.CASED)\n",
    "\n",
    "        self.bert = BertModel.from_pretrained(self.CASED, config=self.config)\n",
    "        self.linear_text = nn.Linear(bert_dim, model_dim)\n",
    "        self.bn_text = nn.BatchNorm1d(model_dim)\n",
    "\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "\n",
    "        #image\n",
    "        self.vgg = vgg(model_dim, pthfile)\n",
    "        self.linear_image = nn.Linear(4096, model_dim)\n",
    "        self.bn_vgg = nn.BatchNorm1d(model_dim)\n",
    "\n",
    "        #dct_image\n",
    "        self.dct_img = DctCNN(model_dim,\n",
    "                              dropout,\n",
    "                              kernel_sizes,\n",
    "                              num_channels,\n",
    "                              in_channel=128,\n",
    "                              branch1_channels=[64],\n",
    "                              branch2_channels=[48, 64],\n",
    "                              branch3_channels=[64, 96, 96],\n",
    "                              branch4_channels=[32],\n",
    "                              out_channels=64)\n",
    "        self.linear_dct = nn.Linear(4096, model_dim)\n",
    "        self.bn_dct = nn.BatchNorm1d(model_dim)\n",
    "\n",
    "        #multimodal fusion\n",
    "        self.fusion_layers = nn.ModuleList([\n",
    "            multimodal_fusion_layer(model_dim, num_heads, ffn_dim, dropout)\n",
    "            for _ in range(num_layers)\n",
    "        ])\n",
    "\n",
    "        #classifier\n",
    "        self.linear1 = nn.Linear(model_dim, 35)\n",
    "        self.bn_1 = nn.BatchNorm1d(35)\n",
    "        self.linear2 = nn.Linear(35, num_labels)\n",
    "        self.softmax = nn.Softmax(dim=1)\n",
    "    \n",
    "    def drop_BN_layer(self, x, part='dct'):\n",
    "        if part == 'dct':\n",
    "            bn = self.bn_dct\n",
    "        elif part == 'vgg':\n",
    "            bn = self.bn_vgg\n",
    "        elif part == 'bert':\n",
    "            bn = self.bn_text\n",
    "\n",
    "        if self.drop_and_BN == 'drop-BN':\n",
    "            x = self.dropout(x)\n",
    "            x = bn(x)\n",
    "        elif self.drop_and_BN == 'BN-drop':\n",
    "            x = bn(x)\n",
    "            x = self.dropout(x)\n",
    "        elif self.drop_and_BN == 'drop-only':\n",
    "            x = self.dropout(x)\n",
    "        elif self.drop_and_BN == 'BN-only':\n",
    "            x = bn(x)\n",
    "        elif self.drop_and_BN == 'none':\n",
    "            pass\n",
    "        \n",
    "        return x\n",
    "\n",
    "    def forward(self, text_input_ids, token_type_ids, attention_mask, image,\n",
    "                dct_img, attn_mask):\n",
    "\n",
    "        #textual feature\n",
    "        bert_output = self.bert(input_ids=text_input_ids,\n",
    "                                token_type_ids=token_type_ids,\n",
    "                                attention_mask=attention_mask)\n",
    "        text_output = bert_output[1]  #the representation of the whole sentence\n",
    "        #print('bert_output:{}, shape:{}'.format(text_output, text_output.shape))\n",
    "        text_output = F.relu(self.linear_text(text_output))\n",
    "        text_output = self.drop_BN_layer(text_output, part='bert')\n",
    "#         print('text_output:{}'text_output.shape)\n",
    "\n",
    "        #visual feature\n",
    "        output = self.vgg(image)\n",
    "        output = F.relu(self.linear_image(output))\n",
    "        output = self.drop_BN_layer(output, part='vgg')\n",
    "\n",
    "        #dct_feature\n",
    "        dct_out = self.dct_img(dct_img)\n",
    "        dct_out = F.relu(self.linear_dct(dct_out))\n",
    "        dct_out = self.drop_BN_layer(dct_out, part='dct')\n",
    "        \n",
    "\n",
    "        for fusion_layer in self.fusion_layers:\n",
    "            output = fusion_layer(output, dct_out, attn_mask)\n",
    "\n",
    "        for fusion_layer in self.fusion_layers:\n",
    "            output = fusion_layer(output, text_output, attn_mask)\n",
    "            #print('fusion output_shape:{}'.format(output.shape))\n",
    "\n",
    "        output = F.relu(self.linear1(output))\n",
    "        output = self.dropout(output)\n",
    "        #output = self.bn_1(output)\n",
    "        output = self.linear2(output)\n",
    "        #print('output_size:{}'.format(output.shape))\n",
    "        y_pred_prob = self.softmax(output)\n",
    "\n",
    "        return output, y_pred_prob"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class EarlyStopping:\n",
    "    \"\"\"Early stops the training if test acc doesn't improve after a given patience.\"\"\"\n",
    "\n",
    "    def __init__(self, patience=7, verbose=False, delta=0, trace_func=print):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            patience (int): How long to wait after last time test acc improved.\n",
    "                            Default: 7\n",
    "            verbose (bool): If True, prints a message for each validation loss improvement.\n",
    "                            Default: False\n",
    "            delta (float): Minimum change in the monitored quantity to qualify as an improvement.\n",
    "                            Default: 0\n",
    "            path (str): Path for the checkpoint to be saved to.\n",
    "                            Default: 'checkpoint.pt'\n",
    "            trace_func (function): trace print function.\n",
    "                            Default: print\n",
    "        \"\"\"\n",
    "        self.patience = patience\n",
    "        self.verbose = verbose\n",
    "\n",
    "        self.counter = 0\n",
    "        self.best_score = None\n",
    "        self.early_stop = False\n",
    "        self.test_acc_max = 0\n",
    "\n",
    "        self.delta = delta\n",
    "        self.trace_func = trace_func\n",
    "\n",
    "    def __call__(self, test_acc,  test_recall_values):\n",
    "\n",
    "        score = test_acc\n",
    "\n",
    "        if self.best_score is None:\n",
    "            self.best_score = score\n",
    "            self.update_max_test_acc(test_acc)\n",
    "        elif score < self.best_score + self.delta:\n",
    "            self.counter += 1\n",
    "            if self.verbose:\n",
    "                self.trace_func(\n",
    "                    f\"EarlyStopping counter: {self.counter} out of {self.patience}. (Best: {self.test_acc_max:.6f})\"\n",
    "                )\n",
    "            if self.counter >= self.patience:\n",
    "                self.trace_func(\n",
    "                    f\"**EarlyStopping Triggered: test accuracy stuck at {self.test_acc_max:.6f} for {self.patience} epoch(es).\"\n",
    "                )\n",
    "                self.early_stop = True\n",
    "        else:\n",
    "            self.best_score = score\n",
    "            self.update_max_test_acc(test_acc)\n",
    "            self.counter = 0\n",
    "            \n",
    "\n",
    "    def update_max_test_acc(self, test_acc):\n",
    "        \"\"\"Saves model when validation loss decrease.\"\"\"\n",
    "        if self.verbose:\n",
    "            self.trace_func(\n",
    "                f\"Test accuracy increased ({self.test_acc_max:.6f} --> {test_acc:.6f}).\"\n",
    "            )\n",
    "        self.test_acc_max = test_acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TrainALL(tune.Trainable):\n",
    "    def get_dataloader(self):\n",
    "        \n",
    "        torch.manual_seed(self.seed)\n",
    "        torch.cuda.manual_seed(self.seed)\n",
    "        \n",
    "        if self.dataset_name == 'weibo':\n",
    "            import data_process_weibo as pro\n",
    "        elif self.dataset_name == 'twitter':\n",
    "            import data_process_twitter as pro\n",
    "\n",
    "        image_list = pro.read_images(pro.image_file_list)\n",
    "\n",
    "        train_data, train_data_num = pro.get_data('train', image_list)\n",
    "        test_data, valid_data_num = pro.get_data('test', image_list)\n",
    "\n",
    "        if self.dataset_name == 'twitter':\n",
    "            transform_vgg = transforms.Compose([\n",
    "                transforms.Resize(256),\n",
    "                transforms.CenterCrop(224),\n",
    "                transforms.ToTensor(),\n",
    "                transforms.Normalize([0.454, 0.440, 0.423], [0.282, 0.278, 0.278])\n",
    "            ])\n",
    "            transform_dct = transforms.Compose(\n",
    "                [transforms.Resize((224, 224)),\n",
    "                 transforms.ToTensor()\n",
    "                ])\n",
    "\n",
    "        elif self.dataset_name == 'weibo':\n",
    "            transform_vgg = transforms.Compose([\n",
    "                transforms.Resize((224,224)),\n",
    "                transforms.ToTensor(),\n",
    "            ])\n",
    "            transform_dct = transforms.Compose([\n",
    "                    transforms.Resize((224,224)),\n",
    "                    transforms.ToTensor()\n",
    "            ])\n",
    "        else:\n",
    "            raise 'Dataset Error'\n",
    "            \n",
    "\n",
    "        train_dataset = MyDataset(data=train_data,\n",
    "                                  VOCAB=self.VOCAB,\n",
    "                                  max_sen_len=self.max_sen_len,\n",
    "                                  transform_vgg=transform_vgg,\n",
    "                                  transform_dct=transform_dct)\n",
    "        train_sampler = RandomSampler(train_dataset)\n",
    "        train_loader = DataLoader(dataset=train_dataset,\n",
    "                                  sampler=train_sampler,\n",
    "                                  batch_size=self.train_bs,\n",
    "                                  num_workers=1)\n",
    "\n",
    "        test_dataset = MyDataset(data=test_data,\n",
    "                                 VOCAB=self.VOCAB,\n",
    "                                 max_sen_len=self.max_sen_len,\n",
    "                                 transform_vgg=transform_vgg,\n",
    "                                 transform_dct=transform_dct)\n",
    "        #         test_sampler = SequentialSampler(test_dataset)\n",
    "        test_sampler = RandomSampler(test_dataset)\n",
    "        test_loader = DataLoader(dataset=test_dataset,\n",
    "                                 sampler=test_sampler,\n",
    "                                 batch_size=self.test_bs,\n",
    "                                 num_workers=1)\n",
    "        return train_loader, test_loader\n",
    "\n",
    "    def get_optimizer(self):\n",
    "        no_decay = [\n",
    "            \"bias\",\n",
    "            \"gamma\",\n",
    "            \"beta\",\n",
    "            \"LayerNorm.weight\",\n",
    "            \"bn_text.weight\",\n",
    "            \"bn_dct.weight\",\n",
    "            \"bn_1.weight\",\n",
    "        ]\n",
    "        \n",
    "        bert_param_optimizer = list(self.model.bert.named_parameters())\n",
    "        vgg_param_optimizer = list(self.model.vgg.named_parameters())\n",
    "        dtcconv_param_optimizer = list(self.model.dct_img.named_parameters())\n",
    "        fusion_param_optimizer = list(\n",
    "            self.model.fusion_layers.named_parameters()\n",
    "        )\n",
    "        linear_param_optimizer = (\n",
    "            list(self.model.linear_text.named_parameters())\n",
    "            + list(self.model.linear_image.named_parameters())\n",
    "            + list(self.model.linear_dct.named_parameters())\n",
    "        )\n",
    "        classifier_param_optimizer = list(self.model.linear1.named_parameters()) + list(\n",
    "            self.model.linear2.named_parameters()\n",
    "        )\n",
    "        optimizer_grouped_parameters = [\n",
    "            # bert_param_optimizer\n",
    "            {\"params\": [p for n, p in bert_param_optimizer if not any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": self.weight_decay,\n",
    "            \"lr\": self.bert_learning_rate,},\n",
    "            {\"params\": [p for n, p in bert_param_optimizer if any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": 0.0,\n",
    "            \"lr\": self.bert_learning_rate,},\n",
    "            # vgg_param_optimizer\n",
    "            {\"params\": [p for n, p in vgg_param_optimizer if not any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": self.weight_decay,\n",
    "            \"lr\": self.vgg_learning_rate,},\n",
    "            {\"params\": [p for n, p in vgg_param_optimizer if any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": 0.0,\n",
    "            \"lr\": self.vgg_learning_rate,},\n",
    "            # dtcconv_param_optimizer\n",
    "            {\"params\": [p for n, p in dtcconv_param_optimizer if not any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": self.weight_decay,\n",
    "            \"lr\": self.dtcconv_learning_rate,},\n",
    "            {\"params\": [p for n, p in dtcconv_param_optimizer if any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": 0.0,\n",
    "            \"lr\": self.dtcconv_learning_rate,},\n",
    "            # fusion_param_optimizer\n",
    "            {\"params\": [p for n, p in fusion_param_optimizer if not any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": self.weight_decay,\n",
    "            \"lr\": self.fusion_learning_rate,},\n",
    "            {\"params\": [p for n, p in fusion_param_optimizer if any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": 0.0,\n",
    "            \"lr\": self.fusion_learning_rate,},\n",
    "            # linear_param_optimizer\n",
    "            {\"params\": [p for n, p in linear_param_optimizer if not any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": self.weight_decay,\n",
    "            \"lr\": self.linear_learning_rate,},\n",
    "            {\"params\": [p for n, p in linear_param_optimizer if any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": 0.0,\n",
    "            \"lr\": self.linear_learning_rate,},\n",
    "            # classifier_param_optimizer\n",
    "            {\"params\": [p for n, p in classifier_param_optimizer if not any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": self.weight_decay,\n",
    "            \"lr\": self.classifier_learning_rate,},\n",
    "            {\"params\": [p for n, p in classifier_param_optimizer if any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": 0.0,\n",
    "            \"lr\": self.classifier_learning_rate,},\n",
    "        ]\n",
    "        \n",
    "        if self.optimizer_name == \"SGD\":\n",
    "            optimizer = torch.optim.SGD(\n",
    "                optimizer_grouped_parameters,\n",
    "#                 filter(lambda p: filter(lambda x: x['params'].requires_grad, p), optimizer_grouped_parameters),\n",
    "                lr=self.learning_rate,\n",
    "                momentum=self.momentum,\n",
    "                weight_decay=self.weight_decay,\n",
    "            )\n",
    "        elif self.optimizer_name == \"Adam\":\n",
    "            optimizer = torch.optim.Adam(\n",
    "                optimizer_grouped_parameters,\n",
    "#                 filter(lambda p: filter(lambda x: x['params'].requires_grad, p), optimizer_grouped_parameters),\n",
    "                lr=self.learning_rate,\n",
    "                weight_decay=self.weight_decay,\n",
    "            )\n",
    "        elif self.optimizer_name == \"AdamW\":\n",
    "            optimizer = torch.optim.AdamW(\n",
    "                optimizer_grouped_parameters,\n",
    "#                 filter(lambda p: filter(lambda x: x['params'].requires_grad, p), optimizer_grouped_parameters),\n",
    "                lr=self.learning_rate,\n",
    "                weight_decay=self.weight_decay,\n",
    "            )\n",
    "        elif self.optimizer_name == \"AdaBelief\":\n",
    "            from adabelief_pytorch import AdaBelief\n",
    "            optimizer = AdaBelief(\n",
    "                optimizer_grouped_parameters,\n",
    "#                 filter(lambda p: filter(lambda x: x['params'].requires_grad, p), optimizer_grouped_parameters),\n",
    "                lr=self.learning_rate,\n",
    "                eps=1e-10, # or 1e-16\n",
    "                betas=(0.9,0.999),\n",
    "                weight_decouple = True,\n",
    "                rectify = False)\n",
    "        else:\n",
    "            raise 'optimizer WRONG'\n",
    "        return optimizer\n",
    "    \n",
    "    def get_scheduler(self):\n",
    "        # Total number of training steps is number of batches * number of epochs.\n",
    "        total_steps = len(self.train_loader) * self.epochs\n",
    "\n",
    "        # Create the learning rate scheduler.\n",
    "        scheduler = get_linear_schedule_with_warmup(\n",
    "            self.optimizer,\n",
    "            num_warmup_steps=round(total_steps*self.warm_up_percentage),\n",
    "            num_training_steps=total_steps\n",
    "        )\n",
    "        return scheduler\n",
    "\n",
    "    def init_network(self, exclude_list=['bert', 'vgg']):\n",
    "        if self.init_method != 'default':\n",
    "            for name, w in self.model.named_parameters():\n",
    "                cross = [val for val in exclude_list if val in name.split('.')]\n",
    "                if cross == []:  # 对于embedding，保留预训练的embedding\n",
    "                    if [val for val in ['bn_text', 'bn_vgg', 'bn_dct', 'bn_1', 'layer_norm'] if val in name.split('.')] == []:\n",
    "                        if 'weight' in name:\n",
    "\n",
    "                            if self.init_method == 'xavier-normal':\n",
    "                                nn.init.xavier_normal_(w)\n",
    "                            elif self.init_method == 'xavier-uniform':\n",
    "                                nn.init.xavier_uniform_(w)\n",
    "                            elif self.init_method == 'kaiming-normal':\n",
    "                                nn.init.kaiming_normal_(w)\n",
    "                            elif self.init_method == 'kaiming-uniform':\n",
    "                                nn.init.kaiming_uniform_(w)\n",
    "                            else:\n",
    "                                pass\n",
    "                        elif 'bias' in name:\n",
    "                            nn.init.constant_(w, 0)\n",
    "                        else:\n",
    "                            pass\n",
    "\n",
    "    def get_model(self):\n",
    "        model = NetShareFusion(CASED=self.CASED,\n",
    "                pthfile=self.pthfile,\n",
    "                kernel_sizes=self.kernel_sizes,\n",
    "                num_channels=self.num_channels,\n",
    "                num_layers=self.num_layers,\n",
    "                num_heads=self.num_heads,\n",
    "                model_dim=self.model_dim,\n",
    "                dropout=self.dropout,\n",
    "                drop_and_BN=self.drop_and_BN)\n",
    "\n",
    "        if self.FREEZE_BERT:\n",
    "            for name, param in model.named_parameters():\n",
    "                if \"bert\" in name:\n",
    "                    param.requires_grad = False\n",
    "        \n",
    "        if self.FREEZE_VGG:\n",
    "            for name, param in model.named_parameters():\n",
    "                if \"vgg\" in name:\n",
    "                    param.requires_grad = False\n",
    "\n",
    "        return model\n",
    "    \n",
    "    def flat_accuracy(self, preds, labels):\n",
    "        pred_flat = np.argmax(preds, axis=1)\n",
    "        labels_flat = labels\n",
    "        return np.sum(pred_flat == labels_flat) / len(labels)\n",
    "    \n",
    "    def config_check(self):\n",
    "        if self.dataset_name == 'weibo' and 'multilingual' in self.CASED:\n",
    "            raise('Using weibo dataset with multilingual model!')\n",
    "        if self.dataset_name == 'twitter' and 'chinese' in self.CASED:\n",
    "            raise('Using twitter dataset with chinese model!')\n",
    "\n",
    "    def setup(self, config):\n",
    "        self.config = config\n",
    "        if environment:\n",
    "            self.CASED =  '/home/**/workspace/model/bert-base-chinese/' #multilingual-cased\n",
    "            self.VOCAB = '/home/**/workspace/model/bert-base-chinese/vocab.txt'\n",
    "            self.pthfile = '/home/**/workspace/model/vgg19-dcbb9e9d.pth'\n",
    "            self.save_root = '/home/**/workspace/output/'\n",
    "        \n",
    "        self.init_method = config.get(\"init_method\")\n",
    "        self.max_grad_norm = 1.0\n",
    "        self.warm_up_percentage = 0.1\n",
    "        self.early_stopping_patience = config.get(\"early_stopping_patience\")\n",
    "        self.early_stopping = EarlyStopping(patience=self.early_stopping_patience, verbose=True)\n",
    "        \n",
    "        self.bert_learning_rate = config.get(\"bert_learning_rate\")\n",
    "        self.vgg_learning_rate = config.get(\"vgg_learning_rate\")\n",
    "        self.dtcconv_learning_rate = config.get(\"dtcconv_learning_rate\")\n",
    "        self.fusion_learning_rate = config.get(\"fusion_learning_rate\")\n",
    "        self.linear_learning_rate = config.get(\"linear_learning_rate\")\n",
    "        self.classifier_learning_rate = config.get(\"classifier_learning_rate\")\n",
    "        \n",
    "        self.FREEZE_BERT = config.get(\"FREEZE_BERT\")\n",
    "        self.FREEZE_VGG = config.get(\"FREEZE_VGG\")                \n",
    "\n",
    "        self.seed = config.get(\"seed\")\n",
    "        self.kernel_sizes = config.get(\"kernel_sizes\")  # [3, 3, 3]\n",
    "        self.num_channels = config.get(\"num_channels\")  # [32, 64, 128]\n",
    "        self.drop_and_BN = config.get(\n",
    "            \"drop_and_BN\"\n",
    "        )  # 'BN-drop', 'drop-BN', 'BN-only', 'drop-only', 'none'\n",
    "        self.num_layers = config.get(\"num_layers\")  # int, e.g, 1\n",
    "        self.num_heads = config.get(\"num_heads\")  # int, e.g, 8\n",
    "        self.model_dim = config.get(\"model_dim\")\n",
    "        self.dropout = config.get(\"dropout\")  # number, e.g. 0.5\n",
    "\n",
    "        self.train_bs = config.get(\"train_bs\")\n",
    "        self.test_bs = config.get(\"test_bs\")\n",
    "        self.momentum = config.get(\"momentum\")\n",
    "        self.epochs = config.get(\"epochs\")\n",
    "        \n",
    "        self.ablation = config.get(\"ablation\")\n",
    "        self.dataset_name = config.get(\"dataset_name\")  # weibo, twitter\n",
    "        self.optimizer_name = config.get(\"optimizer_name\")  # SGD, Adam, AdamW\n",
    "\n",
    "        self.learning_rate = config.get(\"learning_rate\")  # number\n",
    "        self.weight_decay = config.get(\"weight_decay\")  # number\n",
    "        self.max_sen_len = config.get(\"max_sen_len\")  # int\n",
    "        self.device = torch.device(\n",
    "            'cuda' if torch.cuda.is_available() else 'cpu')\n",
    "        \n",
    "        self.config_check()\n",
    "        \n",
    "        self.model = self.get_model().to(self.device)\n",
    "        self.init_network()\n",
    "        \n",
    "        self.train_loader, self.test_loader = self.get_dataloader()\n",
    "        \n",
    "        self.criterion = nn.CrossEntropyLoss()\n",
    "        self.optimizer = self.get_optimizer()\n",
    "        self.scheduler = self.get_scheduler()\n",
    "        \n",
    "    def handle_batch_input(self, train_data):\n",
    "        bert_paras = [\"text_input_ids\", \"token_type_ids\", \"attention_mask\"]\n",
    "        vgg_paras = [\"image\"]\n",
    "        dct_paras = [\"dct_img\"]\n",
    "        share_paras = ['label', 'post_id']\n",
    "        parameters = {}\n",
    "        involve = bert_paras + vgg_paras + dct_paras\n",
    "        involve += share_paras\n",
    "        \n",
    "        for para in involve:\n",
    "            parameters[para] = train_data[para].to(self.device)\n",
    "            \n",
    "        return parameters\n",
    "    \n",
    "    def handle_model_input(self, parameters):\n",
    "        outputs = self.model(parameters['text_input_ids'],\n",
    "                             parameters['token_type_ids'],\n",
    "                             parameters['attention_mask'],\n",
    "                             parameters['image'],\n",
    "                             parameters['dct_img'],\n",
    "                             attn_mask=None)\n",
    "            \n",
    "        return outputs\n",
    "        \n",
    "    def train_one_time(self):\n",
    "        # training\n",
    "        loss_values, test_loss_values = [], []\n",
    "        acc_values, test_acc_values = [], []\n",
    "        test_precision_values = []\n",
    "        test_recall_values = []\n",
    "        test_f1_values = []\n",
    "\n",
    "        for epoch_index, epoch in enumerate(range(self.epochs)):\n",
    "            print('epoch:{}{}'.format(epoch_index, '-' * 20))\n",
    "\n",
    "            self.model.train()\n",
    "\n",
    "            train_batch_loss = []\n",
    "            train_batch_acc = []\n",
    "            for i, train_data in enumerate(self.train_loader):\n",
    "                parameters = self.handle_batch_input(train_data)\n",
    "                train_label = parameters['label']\n",
    "\n",
    "                # Forward + Backward + Optimize\n",
    "                self.model.zero_grad()\n",
    "                outputs = self.handle_model_input(parameters)\n",
    "\n",
    "                loss_input = outputs[0]\n",
    "                loss = self.criterion(loss_input, train_label)\n",
    "                loss.backward(retain_graph=True)\n",
    "\n",
    "                # Gradient cropping\n",
    "                torch.nn.utils.clip_grad_norm_(\n",
    "                    parameters=self.model.parameters(),\n",
    "                    max_norm=self.max_grad_norm)\n",
    "\n",
    "                train_label = train_label.cpu().detach().numpy().tolist()\n",
    "                pred_input = torch.sigmoid(\n",
    "                    outputs[1]).cpu().detach().numpy().tolist()  #output[1]\n",
    "\n",
    "                acc = self.flat_accuracy(pred_input, train_label)\n",
    "                self.optimizer.step()\n",
    "                self.scheduler.step()\n",
    "\n",
    "                train_batch_loss.append(loss.detach().item())\n",
    "                train_batch_acc.append(acc)\n",
    "\n",
    "            # Store the loss value for plotting the learning curve.\n",
    "            train_epoch_loss = sum(train_batch_loss) / len(self.train_loader)\n",
    "            loss_values.append(train_epoch_loss)\n",
    "\n",
    "            # Store the acc value\n",
    "            train_epoch_acc = sum(train_batch_acc) / len(self.train_loader)\n",
    "            acc_values.append(train_epoch_acc)\n",
    "\n",
    "            self.model.eval()\n",
    "\n",
    "            test_batch_loss = []\n",
    "            test_batch_acc = []\n",
    "            report_label = []\n",
    "            report_predict = []\n",
    "            \n",
    "            best_test_acc = 0\n",
    "            \n",
    "\n",
    "            for i, test_data in enumerate(self.test_loader):\n",
    "                parameters = self.handle_batch_input(test_data)\n",
    "                test_label = parameters['label']\n",
    "\n",
    "                with torch.no_grad():\n",
    "                    outputs = self.handle_model_input(parameters)\n",
    "\n",
    "                test_loss_input = outputs[0]\n",
    "                test_loss = self.criterion(test_loss_input, test_label)\n",
    "                \n",
    "                predict = torch.max(outputs[1].cpu().detach(), 1)[1]\n",
    "\n",
    "                test_pred_input = torch.sigmoid(\n",
    "                    outputs[1]).cpu().detach().numpy().tolist()  #output[1]\n",
    "                test_label = test_label.cpu().detach().numpy().tolist()\n",
    "\n",
    "                test_acc = self.flat_accuracy(test_pred_input, test_label)\n",
    "\n",
    "                test_batch_loss.append(test_loss.detach().item())\n",
    "                test_batch_acc.append(test_acc)\n",
    "                \n",
    "                for j in range(len(test_label)):\n",
    "                    report_label.append(test_label[j])\n",
    "                    report_predict.append(predict[j])\n",
    "\n",
    "            test_epoch_loss = sum(test_batch_loss) / len(self.test_loader)\n",
    "            test_epoch_acc = sum(test_batch_acc) / len(self.test_loader)\n",
    "            \n",
    "            report = classification_report(report_label, report_predict, output_dict = True)\n",
    "            \n",
    "            if test_epoch_acc > best_test_acc:\n",
    "                best_test_acc = test_epoch_acc\n",
    "                self.condition_save(epoch_index, test_epoch_acc, report)\n",
    "            \n",
    "            test_loss_values.append(test_epoch_loss)\n",
    "            test_acc_values.append(test_epoch_acc)\n",
    "            test_precision_values.append(float(report[\"macro avg\"][\"precision\"]))\n",
    "            test_recall_values.append(float(report[\"macro avg\"][\"recall\"]))\n",
    "            test_f1_values.append(float(report[\"macro avg\"][\"f1-score\"]))\n",
    "            \n",
    "            self.print_result_table_handler(loss_values, acc_values, test_loss_values, test_acc_values, test_precision_values,test_recall_values,test_f1_values, report, print_type='tabel', table_type='pretty')\n",
    "\n",
    "            #self.condition_save(epoch_index, test_epoch_acc, report)\n",
    "            \n",
    "            # early_stopping HERE～\n",
    "            self.early_stopping(test_epoch_acc, test_recall_values)\n",
    "\n",
    "            if self.early_stopping.early_stop:\n",
    "                break\n",
    "\n",
    "        return np.max(test_acc_values)\n",
    "    \n",
    "    def print_result_table_handler(self, loss_values, acc_values, \n",
    "                                   test_loss_values, test_acc_values, \n",
    "                                   test_precision_values,test_recall_values,\n",
    "                                   test_f1_values, report, print_type='tabel',\n",
    "                                   table_type='pretty'):\n",
    "        \n",
    "        def trend(values_list):\n",
    "            if len(values_list) == 1:\n",
    "                diff_value = values_list[-1]\n",
    "                return '↑ ({:+.6f})'.format(diff_value)\n",
    "            else:\n",
    "                diff_value = values_list[-1] - values_list[-2]\n",
    "                if values_list[-1] > values_list[-2]:\n",
    "                    return '↑ ({:+.6f})'.format(diff_value)\n",
    "                elif values_list[-1] == values_list[-2]:\n",
    "                    return '~'\n",
    "                else:\n",
    "                    return '↓ ({:+.6f})'.format(diff_value)\n",
    "        \n",
    "        if print_type == 'tabel':\n",
    "            avg_table = [[\"train loss\",loss_values[-1],trend(loss_values)],\n",
    "                     [\"train acc\",acc_values[-1],trend(acc_values)],\n",
    "                     [\"test loss\",test_loss_values[-1],trend(test_loss_values)],\n",
    "                     [\"test acc\",test_acc_values[-1],trend(test_acc_values)],\n",
    "                     [\"test pre\", test_precision_values[-1],trend(test_precision_values)],\n",
    "                     ['test rec',test_recall_values[-1],trend(test_recall_values)],\n",
    "                     ['test F1',test_f1_values[-1],trend(test_f1_values)]]\n",
    "\n",
    "\n",
    "            avg_header = ['metric','value','trend']\n",
    "            print((tabulate(avg_table, avg_header, floatfmt=\".6f\", tablefmt=table_type)))\n",
    "\n",
    "            class_table = [['0', report[\"0\"][\"precision\"], report[\"0\"][\"recall\"], report[\"0\"][\"f1-score\"], '{}/{}'.format(report[\"0\"][\"support\"], report['macro avg'][\"support\"])],\n",
    "                          ['1', report[\"1\"][\"precision\"], report[\"1\"][\"recall\"], report[\"1\"][\"f1-score\"], '{}/{}'.format(report[\"1\"][\"support\"], report['macro avg'][\"support\"])]]\n",
    "\n",
    "            class_header = ['class', 'precision', 'recall', 'f1', 'support']\n",
    "            print((tabulate(class_table, class_header, floatfmt=\".6f\", tablefmt=table_type)))\n",
    "        else:\n",
    "            print((\"Average train loss: {}\".format(loss_values[-1])))\n",
    "            print((\"Average train acc: {}\".format(acc_values[-1])))\n",
    "            print((\"Average test loss: {}\".format(test_loss_values[-1])))\n",
    "            print((\"Average test acc: {}\".format(test_acc_values[-1])))\n",
    "            print(report)\n",
    "\n",
    "    def step(self):\n",
    "        test_acc = self.train_one_time()\n",
    "        return {\"best_test_accuracy\": test_acc}\n",
    "    \n",
    "    def save_model(self, folder_path, epoch_index, test_acc, report):\n",
    "        root = self.save_root\n",
    "        now = datetime.now()\n",
    "        dt_string = now.strftime(\"%Y_%m_%d_%H_%M_%S\")\n",
    "        \n",
    "        path = os.path.join(root, folder_path)\n",
    "\n",
    "        if not os.path.exists(path):\n",
    "            os.makedirs(path)\n",
    "            \n",
    "        save_name = \"task_{}-epoch_{}-model_{}-date-{}-acc_{}-precision_{}-recall_{}-f1_{}.pth\".format(\n",
    "                    self.dataset_name, epoch_index, self.ablation, dt_string, test_acc, report[\"macro avg\"][\"precision\"], report[\"macro avg\"][\"recall\"], report[\"macro avg\"][\"f1-score\"])\n",
    "        print(\"Saving model to {}, as {}\".format(path, save_name))\n",
    "        \n",
    "        state = {\n",
    "            \"net\": self.model.state_dict(),\n",
    "            \"optimizer\": self.optimizer.state_dict(),\n",
    "            \"scheduler\": self.scheduler.state_dict(),\n",
    "            \"config\": self.config,\n",
    "        }\n",
    "        torch.save(\n",
    "            state,\n",
    "            os.path.join(\n",
    "                path,\n",
    "                save_name,\n",
    "            ),\n",
    "        )\n",
    "        \n",
    "    def condition_save(self, epoch_index, test_epoch_acc, report):\n",
    "        twitter_threshold = 0.8\n",
    "        weibo_threshold = 0.89\n",
    "        if self.dataset_name == 'twitter':\n",
    "            if test_epoch_acc >= twitter_threshold:\n",
    "                folder_path = 'model_save'\n",
    "                self.save_model(folder_path, epoch_index, test_epoch_acc, report)\n",
    "        elif self.dataset_name == 'weibo':\n",
    "            if test_epoch_acc >= weibo_threshold:\n",
    "                folder_path = 'model_save'\n",
    "                self.save_model(folder_path, epoch_index, test_epoch_acc, report)\n",
    "        \n",
    "    def save_checkpoint(self, checkpoint_dir):\n",
    "        checkpoint_path = os.path.join(checkpoint_dir, \"model.pth\")\n",
    "        torch.save(self.model.state_dict(), checkpoint_path)\n",
    "        return checkpoint_path\n",
    "\n",
    "    def load_checkpoint(self, checkpoint_path):\n",
    "        self.model.load_state_dict(torch.load(checkpoint_path))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class AvgMetricCallback(Callback):\n",
    "    \n",
    "    def __init__(self):\n",
    "        super(AvgMetricCallback, self).__init__()\n",
    "            \n",
    "    def init(self):\n",
    "        try:\n",
    "            self.results_df\n",
    "            self.record_index += 1\n",
    "        except:\n",
    "            self.results_df = pd.DataFrame()\n",
    "            self.record_index = 1\n",
    "            \n",
    "    def handle_parameters(self, config):\n",
    "        for key,value in config.items():\n",
    "            if isinstance(value, list):\n",
    "                config[key] = str(value)\n",
    "                \n",
    "        df = pd.DataFrame(config, index=[self.record_index])\n",
    "        return df\n",
    "\n",
    "    def on_trial_complete(self, iteration, trials, trial, **info):\n",
    "        self.init()\n",
    "        \n",
    "        config_df = self.handle_parameters(trial.config)\n",
    "        config_df['trial'] = trial\n",
    "        self.results_df = pd.concat([self.results_df, config_df], sort=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# grid search - weibo\n",
    "local_dir_root = '/home/**/workspace/log/'\n",
    "repeat_times = 10\n",
    "max_concurrent = 1\n",
    "avg_metric = AvgMetricCallback()\n",
    "\n",
    "analysis = tune.run(\n",
    "    TrainALL,\n",
    "    callbacks=[avg_metric],\n",
    "    metric=\"best_test_accuracy\",\n",
    "    mode=\"max\",\n",
    "    name=\"weibo-experiment\",\n",
    "    local_dir=os.path.join(local_dir_root, '{}'.format(datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\"))),\n",
    "    resources_per_trial={\"cpu\": 1, \"gpu\": 1 /max_concurrent},\n",
    "    stop={\"training_iteration\": 1},\n",
    "    num_samples=repeat_times,\n",
    "    config={\n",
    "        # data\n",
    "        \"dataset_name\": tune.grid_search([\"weibo\"]),\n",
    "        \"max_sen_len\": tune.grid_search([160]),\n",
    "        \n",
    "        # Network\n",
    "        \"ablation\": tune.grid_search(['bert', 'vgg', 'dct', 'bert+vgg+fusion','bert+dct+vgg+concat','bert+vgg+concat',\"bert+dct+vgg+fusion\"]),\n",
    "                \n",
    "        \"bert_model_name\": tune.grid_search([\"bert-base-chinese\"]),\n",
    "        \"kernel_sizes\": tune.grid_search([[3, 3, 3]]),\n",
    "        \"num_channels\": tune.grid_search([[32, 64, 128]]),\n",
    "        \"num_layers\": tune.grid_search([2]), \n",
    "        \"num_heads\": tune.grid_search([4]),\n",
    "        \"dropout\": tune.grid_search([0.5]),\n",
    "        \"drop_and_BN\": tune.grid_search(['drop-BN']), # 'drop-BN', 'BN-drop', 'BN-only', 'drop-only', 'none'\n",
    "        \"FREEZE_BERT\": tune.grid_search([False]),\n",
    "        \"FREEZE_VGG\": tune.grid_search([False]),\n",
    "        \"model_dim\": tune.grid_search([256]),\n",
    "        \"init_method\": tune.grid_search(['default']),\n",
    "\n",
    "        # optimizer\n",
    "        \"optimizer_name\": tune.grid_search([\"AdaBelief\"]), # AdaBelief, Adam are better\n",
    "        \"learning_rate\": tune.grid_search([0.0001]), \n",
    "        \"bert_learning_rate\": tune.loguniform(1e-5, 1e-2),\n",
    "        \"vgg_learning_rate\": tune.loguniform(1e-5, 1e-2),\n",
    "        \"dtcconv_learning_rate\": tune.loguniform(1e-5, 1e-2),\n",
    "        \"fusion_learning_rate\": tune.loguniform(1e-5, 1e-2),\n",
    "        \"linear_learning_rate\": tune.loguniform(1e-5, 1e-2),\n",
    "        \"classifier_learning_rate\": tune.loguniform(1e-5, 1e-2),\n",
    "        \n",
    "        \"momentum\": tune.grid_search([0.9]),\n",
    "        \"weight_decay\": tune.grid_search([0.15]),\n",
    "        \"seed\": tune.grid_search([43]),\n",
    "        \n",
    "        \"early_stopping_patience\": 10,\n",
    "        \n",
    "        # training\n",
    "        \"epochs\": tune.grid_search([100]),\n",
    "        \"train_bs\": tune.grid_search([16]),\n",
    "        \"test_bs\": tune.grid_search([16]),\n",
    "\n",
    "    },\n",
    ")\n",
    "\n",
    "print(\"Best config is:\", analysis.get_best_config(metric=\"best_test_accuracy\", mode=\"max\"))\n"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
