{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "# libraries\n",
    "import torch\n",
    "import numpy as np\n",
    "from keras import optimizers\n",
    "import matplotlib.pyplot as plt\n",
    "from keras import initializers\n",
    "from keras.utils import np_utils\n",
    "from keras import regularizers\n",
    "from keras.models import Sequential, Model\n",
    "from keras.callbacks import ModelCheckpoint\n",
    "from keras.layers.merge import concatenate\n",
    "from keras.utils import to_categorical\n",
    "from keras.layers import Dense, Flatten, Dropout, Input, BatchNormalization, PReLU\n",
    "\n",
    "# for running on multiple GPU\n",
    "import os\n",
    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n",
    "import threading\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### text embeddings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle\n",
    "with open('politifact/finalTrainEmbeddings.pkl', 'rb') as f:\n",
    "    trainEmbeddings = pickle.load(f)\n",
    "with open('politifact/finalTestEmbeddings.pkl', 'rb') as f:\n",
    "    testEmbeddings = pickle.load(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# dictionary---> Article text: ( label, articleURL, ImageId )\n",
    "import json\n",
    "with open('imageDataset/politi/trainJson.json', 'r') as f:\n",
    "    trainData = json.load(f)\n",
    "with open('imageDataset/politi/testJson.json', 'r') as f:\n",
    "    testData = json.load(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in trainEmbeddings:\n",
    "    trainEmbeddings[i] = [torch.mean(j[0], axis=1) for j in trainEmbeddings[i]]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in testEmbeddings:\n",
    "    testEmbeddings[i] = [torch.mean(j[0], axis=1) for j in testEmbeddings[i]]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in testEmbeddings:\n",
    "    temp = testEmbeddings[i]\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# padding\n",
    "# if a paragraph has more than 50 sentences then crop, if less than 50 then pad.\n",
    "\n",
    "for i in trainEmbeddings:\n",
    "    if len(trainEmbeddings[i]) >=50:\n",
    "        trainEmbeddings[i] = trainEmbeddings[i][0:50]\n",
    "    else:\n",
    "        deficit = 50 - len(trainEmbeddings[i])\n",
    "        for j in range(deficit):\n",
    "            trainEmbeddings[i].append(torch.zeros((1,768), dtype=torch.float32, device='cuda:0'))\n",
    "    temp = torch.empty(50,768, dtype=torch.float32, device='cuda:0')\n",
    "    for j in range(len(trainEmbeddings[i])):\n",
    "        temp[j][:] = trainEmbeddings[i][j]\n",
    "    trainEmbeddings[i] = temp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in testEmbeddings:\n",
    "    if len(testEmbeddings[i]) >=50:\n",
    "        testEmbeddings[i] = testEmbeddings[i][0:50]\n",
    "    else:\n",
    "        deficit = 50 - len(testEmbeddings[i])\n",
    "        for j in range(deficit):\n",
    "            testEmbeddings[i].append(torch.zeros((1,768), dtype=torch.float32, device='cuda:0'))\n",
    "    temp = torch.empty(50,768, dtype=torch.float32, device='cuda:0')\n",
    "    for j in range(len(testEmbeddings[i])):\n",
    "        temp[j][:] = testEmbeddings[i][j]\n",
    "    testEmbeddings[i] = temp"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### image embeddings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('politifact/train_vgg_poli.pickle', 'rb') as f:\n",
    "    train_vgg_poli = pickle.load(f)\n",
    "with open('politifact/test_vgg_poli.pickle', 'rb') as f:\n",
    "    test_vgg_poli = pickle.load(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_text = [] # text embeddings\n",
    "train_label = [] # labels\n",
    "test_text = [] # text embeddings\n",
    "test_label = []\n",
    "train_image = [] # image embeddings\n",
    "test_image = []\n",
    "\n",
    "# Train Image IDs\n",
    "# Test Image IDs\n",
    "trainImageNames = [] # names of the images i.e name.jpg\n",
    "trainTextNames = []  # train articles\n",
    "testTextNames = []   # test articles\n",
    "testImageNames = []  # names of the images in the test folder"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# match code\n",
    "# Check whether Image exists, then get corresponding Text Embeddings, and finally append to respective lists\n",
    "for i in train_vgg_poli:\n",
    "    for j in trainData:\n",
    "        if i.split('.jpg')[0] == trainData[j][-1]['Top_img']:\n",
    "            if j in trainEmbeddings:\n",
    "                trainImageNames.append(i)\n",
    "                trainTextNames.append(j)\n",
    "                train_text.append(trainEmbeddings[j])\n",
    "                train_image.append(train_vgg_poli[trainData[j][-1]['Top_img'] + '.jpg'])\n",
    "                train_label.append(trainData[j][0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in test_vgg_poli:\n",
    "    for j in testData:\n",
    "        if i.split('.jpg')[0] == testData[j][-1]['Top_img']:\n",
    "            if j in testEmbeddings:\n",
    "                testImageNames.append(i)\n",
    "                testTextNames.append(j)\n",
    "                test_text.append(testEmbeddings[j])\n",
    "                test_image.append(test_vgg_poli[testData[j][-1]['Top_img'] + '.jpg'])\n",
    "                test_label.append(testData[j][0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(381, 104)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(trainTextNames), len(testTextNames)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(381, 104)"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(trainImageNames), len(testImageNames)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "321 164\n"
     ]
    }
   ],
   "source": [
    "realCount = 0\n",
    "fakeCount = 0\n",
    "\n",
    "for i in train_label:\n",
    "    if i == 1:\n",
    "        realCount += 1\n",
    "    elif i == 0:\n",
    "        fakeCount += 1\n",
    "\n",
    "for i in test_label:\n",
    "    if i == 1:\n",
    "        realCount += 1\n",
    "    elif i == 0:\n",
    "        fakeCount += 1\n",
    "\n",
    "print(realCount, fakeCount)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "df=pd.DataFrame()\n",
    "df['article']=testTextNames\n",
    "df['image']=testImageNames\n",
    "df['label']=test_label\n",
    "\n",
    "\n",
    "# df['article']=trainTextNames\n",
    "# df['image']=trainImageNames\n",
    "# df['label']=train_label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df.to_csv('/media/data_dump_2/Shivangi/baseline_models/politifact_test.csv', sep='\\t')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# saving updated final dataset in the csv files for future reference\n",
    "import pandas as pd\n",
    "df=pd.DataFrame()\n",
    "# df['train_articles']=trainTextNames\n",
    "# df.to_csv('politifact_train_articles.csv', index=False)\n",
    "\n",
    "# df['test_articles']=testTextNames\n",
    "# df.to_csv('politifact_test_articles.csv', index=False)\n",
    "\n",
    "df['train_label']=train_label\n",
    "df.to_csv('politifact_train_label.csv', index=False)\n",
    "\n",
    "# df['test_label']=test_label\n",
    "# df.to_csv('politifact_test_label.csv', index=False)\n",
    "\n",
    "# df['train_image']=trainImageNames\n",
    "# df.to_csv('politifact_train_image.csv', index=False)\n",
    "\n",
    "# df['test_image']=testImageNames\n",
    "# df.to_csv('politifact_test_image.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_label = to_categorical(train_label)\n",
    "test_label = to_categorical(test_label)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_text=[torch.Tensor.numpy(i.cpu()) for i in train_text]\n",
    "test_text=[torch.Tensor.numpy(i.cpu()) for i in test_text]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_text_matrix = np.ndarray(shape=(len(train_text), 50,768))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(381, 50, 768)"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_text_matrix.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "counter = 0\n",
    "for i in train_text:\n",
    "    train_text_matrix[counter][:][:] = i\n",
    "    counter += 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_text_matrix = np.ndarray(shape=(len(test_text), 50,768))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(104, 50, 768)"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_text_matrix.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "counter = 0\n",
    "for i in test_text:\n",
    "    test_text_matrix[counter][:][:] = i\n",
    "    counter += 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(1, 4096)"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_image[0].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_image_matrix = np.ndarray(shape=(len(train_image), 4096,1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(381, 4096, 1)"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_image_matrix .shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "counter = 0\n",
    "for i in train_image:\n",
    "    train_image_matrix[counter][:][:] = i.reshape(4096,1)\n",
    "    counter += 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_image_matrix = np.ndarray(shape=(len(test_image), 4096,1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(104, 4096, 1)"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_image_matrix.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "counter = 0\n",
    "for i in test_image:\n",
    "    test_image_matrix[counter][:][:] = i.reshape(4096,1)\n",
    "    counter += 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_image_matrix = train_image_matrix.reshape(381,4096)\n",
    "test_image_matrix = test_image_matrix.reshape(104,4096)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### multimodal: XLNET + dense layer + VGG"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"model_1\"\n",
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_1 (InputLayer)            (None, 50, 768)      0                                            \n",
      "__________________________________________________________________________________________________\n",
      "flatten_1 (Flatten)             (None, 38400)        0           input_1[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "input_2 (InputLayer)            (None, 4096)         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "dense_1 (Dense)                 (None, 1000)         38401000    flatten_1[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "dense_4 (Dense)                 (None, 2000)         8194000     input_2[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_2 (Dense)                 (None, 500)          500500      dense_1[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_5 (Dense)                 (None, 1000)         2001000     dense_4[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_3 (Dense)                 (None, 100)          50100       dense_2[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_6 (Dense)                 (None, 100)          100100      dense_5[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_1 (BatchNor (None, 100)          400         dense_3[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_2 (BatchNor (None, 100)          400         dense_6[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dropout_1 (Dropout)             (None, 100)          0           batch_normalization_1[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "dropout_2 (Dropout)             (None, 100)          0           batch_normalization_2[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_1 (Concatenate)     (None, 200)          0           dropout_1[0][0]                  \n",
      "                                                                 dropout_2[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "dense_7 (Dense)                 (None, 200)          40200       concatenate_1[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "dense_8 (Dense)                 (None, 100)          20100       dense_7[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_9 (Dense)                 (None, 50)           5050        dense_8[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dropout_3 (Dropout)             (None, 50)           0           dense_9[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_10 (Dense)                (None, 2)            102         dropout_3[0][0]                  \n",
      "==================================================================================================\n",
      "Total params: 49,312,952\n",
      "Trainable params: 49,312,552\n",
      "Non-trainable params: 400\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "input_text = Input(shape=(50,768))\n",
    "text_flat = Flatten()(input_text)\n",
    "dense_text = Dense(1000,activation='relu',kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.he_normal(seed=0))(text_flat)\n",
    "#dense_text = Dropout(0.4)(dense_text)\n",
    "dense_text = Dense(500,activation='relu',kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.he_normal(seed=0))(dense_text)\n",
    "#dense_text = Dropout(0.4)(dense_text)\n",
    "dense_text = Dense(100,activation='relu',kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.he_normal(seed=0))(dense_text)\n",
    "dense_text = BatchNormalization()(dense_text)\n",
    "dense_text_drop = Dropout(0.4)(dense_text)\n",
    "\n",
    "input_image = Input(shape=(4096,))\n",
    "dense_image = Dense(2000,activation='relu',kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.he_normal(seed=0))(input_image)\n",
    "#dense_image = Dropout(0.4)(dense_image)\n",
    "dense_image = Dense(1000, activation='relu',kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.he_normal(seed=0))(dense_image)\n",
    "#dense_image = Dropout(0.4)(dense_image)\n",
    "dense_image = Dense(100,activation='relu',kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.he_normal(seed=0))(dense_image)\n",
    "dense_image = BatchNormalization()(dense_image)\n",
    "dense_image_drop = Dropout(0.4)(dense_image)\n",
    "\n",
    "concat = concatenate([dense_text_drop,dense_image_drop])\n",
    "\n",
    "inter1_dense = Dense(200,activation='relu',kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.he_normal(seed=0))(concat)\n",
    "inter1_dense = Dense(100,activation='relu',kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.he_normal(seed=0))(inter1_dense)\n",
    "final_dense = Dense(50,activation='relu',kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.he_normal(seed=0))(inter1_dense)\n",
    "final_dropout = Dropout(0.4)(final_dense)\n",
    "output = Dense(2, activation='softmax')(final_dropout)\n",
    "\n",
    "model = Model(inputs=[input_text,input_image], outputs=output)\n",
    "adam = optimizers.Adam(lr=1e-4)\n",
    "#adagrad = optimizers.Adagrad(lr=1e-4)\n",
    "#adamax = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999)\n",
    "checkpoint = ModelCheckpoint(filepath='../checkpoints_polity/dense_MM_model.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n",
    "callbacks_list = [checkpoint]\n",
    "\n",
    "#sgd = optimizers.SGD(lr=1e-4, clipnorm=1.)\n",
    "model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 381 samples, validate on 104 samples\n",
      "Epoch 1/100\n",
      "381/381 [==============================] - 2s 5ms/step - loss: 101.8646 - accuracy: 0.4016 - val_loss: 102.1005 - val_accuracy: 0.4904\n",
      "Epoch 2/100\n",
      "160/381 [===========>..................] - ETA: 0s - loss: 99.1613 - accuracy: 0.5813"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/shivangi/.conda/envs/BERT/lib/python3.7/site-packages/keras/callbacks/callbacks.py:707: RuntimeWarning: Can save best model only with val_acc available, skipping.\n",
      "  'skipping.' % (self.monitor), RuntimeWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "381/381 [==============================] - 0s 1ms/step - loss: 98.4342 - accuracy: 0.6115 - val_loss: 98.3001 - val_accuracy: 0.5962\n",
      "Epoch 3/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 95.8346 - accuracy: 0.6824 - val_loss: 95.2432 - val_accuracy: 0.6250\n",
      "Epoch 4/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 93.3287 - accuracy: 0.7559 - val_loss: 92.6074 - val_accuracy: 0.6923\n",
      "Epoch 5/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 90.9521 - accuracy: 0.8058 - val_loss: 90.2007 - val_accuracy: 0.6827\n",
      "Epoch 6/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 88.7045 - accuracy: 0.8478 - val_loss: 87.9439 - val_accuracy: 0.7115\n",
      "Epoch 7/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 86.5274 - accuracy: 0.8609 - val_loss: 85.7526 - val_accuracy: 0.7212\n",
      "Epoch 8/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 84.4128 - accuracy: 0.8819 - val_loss: 83.6416 - val_accuracy: 0.7212\n",
      "Epoch 9/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 82.3701 - accuracy: 0.8845 - val_loss: 81.5687 - val_accuracy: 0.7885\n",
      "Epoch 10/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 80.3458 - accuracy: 0.9186 - val_loss: 79.6085 - val_accuracy: 0.7788\n",
      "Epoch 11/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 78.4003 - accuracy: 0.9108 - val_loss: 77.6945 - val_accuracy: 0.7692\n",
      "Epoch 12/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 76.4964 - accuracy: 0.9370 - val_loss: 75.8198 - val_accuracy: 0.7692\n",
      "Epoch 13/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 74.6710 - accuracy: 0.9239 - val_loss: 74.0128 - val_accuracy: 0.7885\n",
      "Epoch 14/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 72.8317 - accuracy: 0.9606 - val_loss: 72.2475 - val_accuracy: 0.7692\n",
      "Epoch 15/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 71.0792 - accuracy: 0.9449 - val_loss: 70.4646 - val_accuracy: 0.7788\n",
      "Epoch 16/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 69.3413 - accuracy: 0.9423 - val_loss: 68.7574 - val_accuracy: 0.8173\n",
      "Epoch 17/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 67.6791 - accuracy: 0.9501 - val_loss: 67.0852 - val_accuracy: 0.8173\n",
      "Epoch 18/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 66.0699 - accuracy: 0.9396 - val_loss: 65.5155 - val_accuracy: 0.8173\n",
      "Epoch 19/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 64.4303 - accuracy: 0.9633 - val_loss: 64.0389 - val_accuracy: 0.7692\n",
      "Epoch 20/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 62.8831 - accuracy: 0.9685 - val_loss: 62.4630 - val_accuracy: 0.7885\n",
      "Epoch 21/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 61.3672 - accuracy: 0.9633 - val_loss: 60.9846 - val_accuracy: 0.7788\n",
      "Epoch 22/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 59.8964 - accuracy: 0.9764 - val_loss: 59.5528 - val_accuracy: 0.7692\n",
      "Epoch 23/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 58.4341 - accuracy: 0.9790 - val_loss: 58.1106 - val_accuracy: 0.7788\n",
      "Epoch 24/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 57.0505 - accuracy: 0.9738 - val_loss: 56.7395 - val_accuracy: 0.7692\n",
      "Epoch 25/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 55.6630 - accuracy: 0.9816 - val_loss: 55.4178 - val_accuracy: 0.7596\n",
      "Epoch 26/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 54.3205 - accuracy: 0.9790 - val_loss: 54.0724 - val_accuracy: 0.7692\n",
      "Epoch 27/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 53.0315 - accuracy: 0.9633 - val_loss: 52.7866 - val_accuracy: 0.7788\n",
      "Epoch 28/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 51.7434 - accuracy: 0.9869 - val_loss: 51.5358 - val_accuracy: 0.8077\n",
      "Epoch 29/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 50.5134 - accuracy: 0.9790 - val_loss: 50.3013 - val_accuracy: 0.7981\n",
      "Epoch 30/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 49.3007 - accuracy: 0.9816 - val_loss: 49.1038 - val_accuracy: 0.7885\n",
      "Epoch 31/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 48.1149 - accuracy: 0.9843 - val_loss: 47.9341 - val_accuracy: 0.7788\n",
      "Epoch 32/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 46.9567 - accuracy: 0.9790 - val_loss: 46.8077 - val_accuracy: 0.7981\n",
      "Epoch 33/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 45.8297 - accuracy: 0.9921 - val_loss: 45.7243 - val_accuracy: 0.7981\n",
      "Epoch 34/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 44.7383 - accuracy: 0.9921 - val_loss: 44.6364 - val_accuracy: 0.7885\n",
      "Epoch 35/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 43.6823 - accuracy: 0.9843 - val_loss: 43.6678 - val_accuracy: 0.7692\n",
      "Epoch 36/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 42.6366 - accuracy: 0.9921 - val_loss: 42.5670 - val_accuracy: 0.7885\n",
      "Epoch 37/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 41.6171 - accuracy: 0.9869 - val_loss: 41.5668 - val_accuracy: 0.7981\n",
      "Epoch 38/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 40.6222 - accuracy: 0.9974 - val_loss: 40.5879 - val_accuracy: 0.7885\n",
      "Epoch 39/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 39.6587 - accuracy: 0.9921 - val_loss: 39.6753 - val_accuracy: 0.7885\n",
      "Epoch 40/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 38.7248 - accuracy: 0.9948 - val_loss: 38.7760 - val_accuracy: 0.7788\n",
      "Epoch 41/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 37.7991 - accuracy: 0.9974 - val_loss: 37.9042 - val_accuracy: 0.7596\n",
      "Epoch 42/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 36.9166 - accuracy: 0.9869 - val_loss: 36.9882 - val_accuracy: 0.7404\n",
      "Epoch 43/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 36.0484 - accuracy: 0.9921 - val_loss: 36.0865 - val_accuracy: 0.8077\n",
      "Epoch 44/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 35.2017 - accuracy: 0.9843 - val_loss: 35.3025 - val_accuracy: 0.8077\n",
      "Epoch 45/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 34.3675 - accuracy: 0.9921 - val_loss: 34.5543 - val_accuracy: 0.7885\n",
      "Epoch 46/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 33.5742 - accuracy: 0.9895 - val_loss: 33.5974 - val_accuracy: 0.7885\n",
      "Epoch 47/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 32.7940 - accuracy: 0.9869 - val_loss: 32.8599 - val_accuracy: 0.7788\n",
      "Epoch 48/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 32.0343 - accuracy: 0.9921 - val_loss: 32.1744 - val_accuracy: 0.7885\n",
      "Epoch 49/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 31.2935 - accuracy: 0.9895 - val_loss: 31.6096 - val_accuracy: 0.7596\n",
      "Epoch 50/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 30.5859 - accuracy: 0.9816 - val_loss: 30.9742 - val_accuracy: 0.7885\n",
      "Epoch 51/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 29.8763 - accuracy: 0.9869 - val_loss: 30.1265 - val_accuracy: 0.7788\n",
      "Epoch 52/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 29.1913 - accuracy: 0.9869 - val_loss: 29.3199 - val_accuracy: 0.7885\n",
      "Epoch 53/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 28.5285 - accuracy: 0.9948 - val_loss: 28.7357 - val_accuracy: 0.7788\n",
      "Epoch 54/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 27.8624 - accuracy: 0.9895 - val_loss: 28.1105 - val_accuracy: 0.7885\n",
      "Epoch 55/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 27.2175 - accuracy: 0.9948 - val_loss: 27.4909 - val_accuracy: 0.7885\n",
      "Epoch 56/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 26.5944 - accuracy: 0.9921 - val_loss: 26.8687 - val_accuracy: 0.7788\n",
      "Epoch 57/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 25.9753 - accuracy: 0.9974 - val_loss: 26.3184 - val_accuracy: 0.7692\n",
      "Epoch 58/100\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "381/381 [==============================] - 0s 1ms/step - loss: 25.3860 - accuracy: 1.0000 - val_loss: 25.7305 - val_accuracy: 0.7788\n",
      "Epoch 59/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 24.8115 - accuracy: 0.9948 - val_loss: 25.1396 - val_accuracy: 0.8173\n",
      "Epoch 60/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 24.2556 - accuracy: 0.9921 - val_loss: 24.5408 - val_accuracy: 0.8173\n",
      "Epoch 61/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 23.6925 - accuracy: 1.0000 - val_loss: 23.9995 - val_accuracy: 0.7981\n",
      "Epoch 62/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 23.1525 - accuracy: 0.9974 - val_loss: 23.4664 - val_accuracy: 0.8173\n",
      "Epoch 63/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 22.6285 - accuracy: 0.9948 - val_loss: 22.9171 - val_accuracy: 0.7981\n",
      "Epoch 64/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 22.1176 - accuracy: 0.9948 - val_loss: 22.3497 - val_accuracy: 0.8269\n",
      "Epoch 65/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 21.6194 - accuracy: 0.9948 - val_loss: 21.9000 - val_accuracy: 0.8173\n",
      "Epoch 66/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 21.1336 - accuracy: 0.9974 - val_loss: 21.4477 - val_accuracy: 0.7981\n",
      "Epoch 67/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 20.6547 - accuracy: 0.9948 - val_loss: 21.1199 - val_accuracy: 0.7788\n",
      "Epoch 68/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 20.1907 - accuracy: 0.9974 - val_loss: 20.6394 - val_accuracy: 0.7692\n",
      "Epoch 69/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 19.7372 - accuracy: 0.9974 - val_loss: 20.1236 - val_accuracy: 0.8077\n",
      "Epoch 70/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 19.3025 - accuracy: 0.9974 - val_loss: 19.6547 - val_accuracy: 0.8077\n",
      "Epoch 71/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 18.8566 - accuracy: 1.0000 - val_loss: 19.2243 - val_accuracy: 0.7788\n",
      "Epoch 72/100\n",
      "381/381 [==============================] - 1s 2ms/step - loss: 18.4489 - accuracy: 0.9921 - val_loss: 18.8827 - val_accuracy: 0.7788\n",
      "Epoch 73/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 18.0409 - accuracy: 0.9948 - val_loss: 18.4071 - val_accuracy: 0.7981\n",
      "Epoch 74/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 17.6591 - accuracy: 0.9895 - val_loss: 18.1194 - val_accuracy: 0.7404\n",
      "Epoch 75/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 17.2800 - accuracy: 0.9843 - val_loss: 17.6571 - val_accuracy: 0.7885\n",
      "Epoch 76/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 16.8977 - accuracy: 0.9974 - val_loss: 17.2989 - val_accuracy: 0.7885\n",
      "Epoch 77/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 16.5428 - accuracy: 0.9869 - val_loss: 16.8593 - val_accuracy: 0.8173\n",
      "Epoch 78/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 16.1811 - accuracy: 0.9895 - val_loss: 16.7758 - val_accuracy: 0.7308\n",
      "Epoch 79/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 15.8297 - accuracy: 0.9921 - val_loss: 16.3618 - val_accuracy: 0.7885\n",
      "Epoch 80/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 15.4766 - accuracy: 0.9948 - val_loss: 15.8355 - val_accuracy: 0.7788\n",
      "Epoch 81/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 15.1493 - accuracy: 0.9869 - val_loss: 15.4426 - val_accuracy: 0.8173\n",
      "Epoch 82/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 14.8331 - accuracy: 0.9843 - val_loss: 15.2742 - val_accuracy: 0.7596\n",
      "Epoch 83/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 14.5084 - accuracy: 0.9895 - val_loss: 15.0896 - val_accuracy: 0.7500\n",
      "Epoch 84/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 14.1914 - accuracy: 0.9895 - val_loss: 14.6517 - val_accuracy: 0.7692\n",
      "Epoch 85/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 13.8829 - accuracy: 0.9974 - val_loss: 14.4035 - val_accuracy: 0.7788\n",
      "Epoch 86/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 13.5791 - accuracy: 1.0000 - val_loss: 14.3592 - val_accuracy: 0.7404\n",
      "Epoch 87/100\n",
      "381/381 [==============================] - 1s 1ms/step - loss: 13.3026 - accuracy: 0.9921 - val_loss: 13.9398 - val_accuracy: 0.7115\n",
      "Epoch 88/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 13.0379 - accuracy: 0.9921 - val_loss: 14.4473 - val_accuracy: 0.6635\n",
      "Epoch 89/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 12.7428 - accuracy: 0.9921 - val_loss: 13.4779 - val_accuracy: 0.7500\n",
      "Epoch 90/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 12.4654 - accuracy: 1.0000 - val_loss: 13.0167 - val_accuracy: 0.7885\n",
      "Epoch 91/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 12.2006 - accuracy: 1.0000 - val_loss: 12.7334 - val_accuracy: 0.7788\n",
      "Epoch 92/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 11.9513 - accuracy: 0.9948 - val_loss: 12.6302 - val_accuracy: 0.7788\n",
      "Epoch 93/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 11.7058 - accuracy: 0.9869 - val_loss: 12.1966 - val_accuracy: 0.7885\n",
      "Epoch 94/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 11.4637 - accuracy: 0.9869 - val_loss: 11.9221 - val_accuracy: 0.7788\n",
      "Epoch 95/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 11.2314 - accuracy: 0.9895 - val_loss: 11.8814 - val_accuracy: 0.7212\n",
      "Epoch 96/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 11.0193 - accuracy: 0.9869 - val_loss: 11.9599 - val_accuracy: 0.7019\n",
      "Epoch 97/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 10.7879 - accuracy: 0.9948 - val_loss: 11.4033 - val_accuracy: 0.7500\n",
      "Epoch 98/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 10.5597 - accuracy: 1.0000 - val_loss: 11.1641 - val_accuracy: 0.7788\n",
      "Epoch 99/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 10.3564 - accuracy: 0.9895 - val_loss: 10.8826 - val_accuracy: 0.7788\n",
      "Epoch 100/100\n",
      "381/381 [==============================] - 0s 1ms/step - loss: 10.1490 - accuracy: 0.9948 - val_loss: 10.8185 - val_accuracy: 0.7788\n"
     ]
    }
   ],
   "source": [
    "history = model.fit([train_text_matrix, train_image_matrix],train_label,validation_data=([test_text_matrix,test_image_matrix],test_label),batch_size =32,epochs =100,callbacks=callbacks_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('XL_poli_history.json', 'w') as f:\n",
    "    json.dump(str(history.history), f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.plot(history.history['acc'])\n",
    "plt.plot(history.history['val_acc'])\n",
    "plt.title('Model accuracy')\n",
    "plt.ylabel('Accuracy')\n",
    "plt.xlabel('Epoch')\n",
    "plt.legend(['Train', 'Test'], loc='upper left')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.plot(history.history['loss'])\n",
    "plt.plot(history.history['val_loss'])\n",
    "plt.title('')\n",
    "plt.ylabel('Loss')\n",
    "plt.xlabel('Epoch')\n",
    "plt.legend(['Train', 'Test'], loc='upper left')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### XLNET+ dense layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseline_model():\n",
    "    model = Sequential()\n",
    "    model.add(Flatten(input_shape=(50,768)))\n",
    "    model.add(Dense(1000, activation='relu', kernel_regularizer=regularizers.l2(0.01)))\n",
    "    #model.add(Dropout(0.4))\n",
    "    model.add(Dense(500, activation='relu', kernel_regularizer=regularizers.l2(0.01)))\n",
    "    #model.add(Dropout(0.4))\n",
    "    model.add(Dense(100, activation='relu', kernel_regularizer=regularizers.l2(0.01)))\n",
    "    #model.add(BatchNormalization())\n",
    "    model.add(Dropout(0.4))\n",
    "    model.add(Dense(2, activation='softmax'))\n",
    "    #adam = optimizers.Adam(lr=1e-4)\n",
    "    #adamax = optimizers.Adamax(learning_rate=0.002, beta_1=0.9, beta_2=0.999)\n",
    "    sgd = optimizers.SGD(lr=0.001, clipnorm=1.)\n",
    "    #adadelta = optimizers.Adadelta(lr=1.0, rho=0.95)\n",
    "    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "checkpoint = ModelCheckpoint(filepath='../checkpoints_polity/dense_Text_model.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n",
    "callbacks_list = [checkpoint]\n",
    "model = baseline_model()\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tempHist = model.fit(train_text_matrix,train_label,validation_data=(test_text_matrix,test_label),batch_size =32,epochs =100,callbacks=callbacks_list)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### XLNET + LSTM "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from keras.models import Sequential\n",
    "from keras.layers import Dense, Dropout, Conv1D,MaxPooling1D,Flatten\n",
    "from keras.layers import LSTM, Bidirectional\n",
    "from keras.optimizers import RMSprop"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def lstm():\n",
    "    model = Sequential()\n",
    "    model.add(LSTM(128,input_shape=(50,768)))\n",
    "    model.add(Dropout(0.5))\n",
    "    model.add(Dense(595, activation='relu'))\n",
    "    model.add(Dense(2, activation='softmax'))\n",
    "    model.compile(loss='categorical_crossentropy',optimizer='sgd',metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "checkpoint = ModelCheckpoint(filepath='../checkpoints_polity/lstm_text_model.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n",
    "callbacks_list = [checkpoint]\n",
    "model = lstm()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "model.fit(train_text_matrix,train_label,validation_data=(test_text_matrix,test_label),batch_size =32,epochs =100, callbacks=callbacks_list)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### XLNET + CNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def cnn_model():\n",
    "    model = Sequential()\n",
    "    model.add(Conv1D(filters=3, kernel_size=5, activation='relu',data_format='channels_first' , input_shape=(50,768)))\n",
    "    model.add(MaxPooling1D(pool_size=2))\n",
    "    model.add(Dropout(0.5))\n",
    "    model.add(Conv1D(filters=3, kernel_size=5, activation='relu',data_format='channels_first' ))\n",
    "    model.add(MaxPooling1D(pool_size=2))\n",
    "    model.add(Dropout(0.5))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(500, activation='relu'))\n",
    "    model.add(Dropout(0.5))\n",
    "    model.add(Dense(100, activation='relu'))\n",
    "    model.add(Dropout(0.5))\n",
    "    model.add(Dense(2, activation='softmax'))\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model=cnn_model()\n",
    "checkpoint = ModelCheckpoint(filepath='../checkpoints_polity/cnn_text_model.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n",
    "callbacks_list = [checkpoint]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.fit(train_text_matrix,train_label,validation_data=(test_text_matrix,test_label),batch_size =32,epochs =100, callbacks=callbacks_list)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python (BERT)",
   "language": "python",
   "name": "bert"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
