{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "unmasked samples:  100.0\n",
      "Epoch 1/20\n",
      "96/96 [==============================] - 2s 10ms/step - loss: 3.6431 - Bandwidth_loss: 1.0125 - Duration_loss: 1.0341 - Class_loss: 1.5964 - Bandwidth_accuracy: 0.5864 - Duration_accuracy: 0.5550 - Class_accuracy: 0.3000 - val_loss: 2.9860 - val_Bandwidth_loss: 0.8749 - val_Duration_loss: 0.9440 - val_Class_loss: 1.1671 - val_Bandwidth_accuracy: 0.6067 - val_Duration_accuracy: 0.5533 - val_Class_accuracy: 0.3333\n",
      "Epoch 2/20\n",
      "96/96 [==============================] - 1s 9ms/step - loss: 2.9568 - Bandwidth_loss: 0.6538 - Duration_loss: 0.7324 - Class_loss: 1.5705 - Bandwidth_accuracy: 0.7203 - Duration_accuracy: 0.7021 - Class_accuracy: 0.3075 - val_loss: 2.4065 - val_Bandwidth_loss: 0.7121 - val_Duration_loss: 0.7646 - val_Class_loss: 0.9298 - val_Bandwidth_accuracy: 0.7133 - val_Duration_accuracy: 0.7333 - val_Class_accuracy: 0.6533\n",
      "Epoch 3/20\n",
      "96/96 [==============================] - 1s 9ms/step - loss: 2.6928 - Bandwidth_loss: 0.5305 - Duration_loss: 0.6103 - Class_loss: 1.5520 - Bandwidth_accuracy: 0.7812 - Duration_accuracy: 0.7431 - Class_accuracy: 0.3115 - val_loss: 2.1933 - val_Bandwidth_loss: 0.7647 - val_Duration_loss: 0.6852 - val_Class_loss: 0.7434 - val_Bandwidth_accuracy: 0.7067 - val_Duration_accuracy: 0.7600 - val_Class_accuracy: 0.7133\n",
      "Epoch 4/20\n",
      "96/96 [==============================] - 1s 9ms/step - loss: 2.5253 - Bandwidth_loss: 0.4521 - Duration_loss: 0.5339 - Class_loss: 1.5392 - Bandwidth_accuracy: 0.8172 - Duration_accuracy: 0.7752 - Class_accuracy: 0.3121 - val_loss: 1.7669 - val_Bandwidth_loss: 0.5690 - val_Duration_loss: 0.6132 - val_Class_loss: 0.5847 - val_Bandwidth_accuracy: 0.8000 - val_Duration_accuracy: 0.7600 - val_Class_accuracy: 0.7800\n",
      "Epoch 5/20\n",
      "96/96 [==============================] - 1s 10ms/step - loss: 2.4765 - Bandwidth_loss: 0.4331 - Duration_loss: 0.5128 - Class_loss: 1.5306 - Bandwidth_accuracy: 0.8194 - Duration_accuracy: 0.7765 - Class_accuracy: 0.3131 - val_loss: 1.8159 - val_Bandwidth_loss: 0.5708 - val_Duration_loss: 0.6510 - val_Class_loss: 0.5940 - val_Bandwidth_accuracy: 0.7867 - val_Duration_accuracy: 0.7267 - val_Class_accuracy: 0.8200\n",
      "Epoch 6/20\n",
      "96/96 [==============================] - 1s 9ms/step - loss: 2.3765 - Bandwidth_loss: 0.3741 - Duration_loss: 0.4783 - Class_loss: 1.5241 - Bandwidth_accuracy: 0.8448 - Duration_accuracy: 0.7861 - Class_accuracy: 0.3134 - val_loss: 1.7229 - val_Bandwidth_loss: 0.5298 - val_Duration_loss: 0.6299 - val_Class_loss: 0.5631 - val_Bandwidth_accuracy: 0.8133 - val_Duration_accuracy: 0.7667 - val_Class_accuracy: 0.8133\n",
      "Epoch 7/20\n",
      "96/96 [==============================] - 1s 9ms/step - loss: 2.3317 - Bandwidth_loss: 0.3572 - Duration_loss: 0.4543 - Class_loss: 1.5202 - Bandwidth_accuracy: 0.8547 - Duration_accuracy: 0.7943 - Class_accuracy: 0.3137 - val_loss: 1.8171 - val_Bandwidth_loss: 0.6019 - val_Duration_loss: 0.6208 - val_Class_loss: 0.5943 - val_Bandwidth_accuracy: 0.7867 - val_Duration_accuracy: 0.7467 - val_Class_accuracy: 0.7933\n",
      "Epoch 8/20\n",
      "96/96 [==============================] - 1s 9ms/step - loss: 2.2900 - Bandwidth_loss: 0.3353 - Duration_loss: 0.4378 - Class_loss: 1.5169 - Bandwidth_accuracy: 0.8615 - Duration_accuracy: 0.8008 - Class_accuracy: 0.3141 - val_loss: 1.4283 - val_Bandwidth_loss: 0.4558 - val_Duration_loss: 0.5373 - val_Class_loss: 0.4352 - val_Bandwidth_accuracy: 0.8200 - val_Duration_accuracy: 0.7800 - val_Class_accuracy: 0.8533\n",
      "Epoch 9/20\n",
      "96/96 [==============================] - 1s 10ms/step - loss: 2.2251 - Bandwidth_loss: 0.2942 - Duration_loss: 0.4166 - Class_loss: 1.5144 - Bandwidth_accuracy: 0.8782 - Duration_accuracy: 0.8063 - Class_accuracy: 0.3145 - val_loss: 1.4604 - val_Bandwidth_loss: 0.4842 - val_Duration_loss: 0.5541 - val_Class_loss: 0.4221 - val_Bandwidth_accuracy: 0.8200 - val_Duration_accuracy: 0.8067 - val_Class_accuracy: 0.8667\n",
      "Epoch 10/20\n",
      "96/96 [==============================] - 1s 10ms/step - loss: 2.1798 - Bandwidth_loss: 0.2700 - Duration_loss: 0.3967 - Class_loss: 1.5131 - Bandwidth_accuracy: 0.8863 - Duration_accuracy: 0.8137 - Class_accuracy: 0.3145 - val_loss: 1.2007 - val_Bandwidth_loss: 0.3698 - val_Duration_loss: 0.4820 - val_Class_loss: 0.3489 - val_Bandwidth_accuracy: 0.8600 - val_Duration_accuracy: 0.8400 - val_Class_accuracy: 0.8867\n",
      "Epoch 11/20\n",
      "96/96 [==============================] - 1s 10ms/step - loss: 2.1309 - Bandwidth_loss: 0.2436 - Duration_loss: 0.3753 - Class_loss: 1.5119 - Bandwidth_accuracy: 0.8985 - Duration_accuracy: 0.8234 - Class_accuracy: 0.3145 - val_loss: 1.1801 - val_Bandwidth_loss: 0.3698 - val_Duration_loss: 0.4873 - val_Class_loss: 0.3230 - val_Bandwidth_accuracy: 0.8333 - val_Duration_accuracy: 0.7933 - val_Class_accuracy: 0.8933\n",
      "Epoch 12/20\n",
      "96/96 [==============================] - 1s 10ms/step - loss: 2.1154 - Bandwidth_loss: 0.2346 - Duration_loss: 0.3700 - Class_loss: 1.5108 - Bandwidth_accuracy: 0.9049 - Duration_accuracy: 0.8226 - Class_accuracy: 0.3147 - val_loss: 1.2009 - val_Bandwidth_loss: 0.3857 - val_Duration_loss: 0.4998 - val_Class_loss: 0.3153 - val_Bandwidth_accuracy: 0.8333 - val_Duration_accuracy: 0.7733 - val_Class_accuracy: 0.8933\n",
      "Epoch 13/20\n",
      "96/96 [==============================] - 1s 9ms/step - loss: 2.0907 - Bandwidth_loss: 0.2204 - Duration_loss: 0.3601 - Class_loss: 1.5102 - Bandwidth_accuracy: 0.9041 - Duration_accuracy: 0.8267 - Class_accuracy: 0.3147 - val_loss: 1.2306 - val_Bandwidth_loss: 0.4028 - val_Duration_loss: 0.5040 - val_Class_loss: 0.3239 - val_Bandwidth_accuracy: 0.8467 - val_Duration_accuracy: 0.8267 - val_Class_accuracy: 0.8867\n",
      "Epoch 14/20\n",
      "96/96 [==============================] - 1s 9ms/step - loss: 2.0725 - Bandwidth_loss: 0.2128 - Duration_loss: 0.3500 - Class_loss: 1.5097 - Bandwidth_accuracy: 0.9094 - Duration_accuracy: 0.8324 - Class_accuracy: 0.3149 - val_loss: 1.3259 - val_Bandwidth_loss: 0.4359 - val_Duration_loss: 0.5520 - val_Class_loss: 0.3379 - val_Bandwidth_accuracy: 0.8333 - val_Duration_accuracy: 0.8133 - val_Class_accuracy: 0.8867\n",
      "Epoch 15/20\n",
      "96/96 [==============================] - 1s 10ms/step - loss: 2.0517 - Bandwidth_loss: 0.2028 - Duration_loss: 0.3396 - Class_loss: 1.5093 - Bandwidth_accuracy: 0.9151 - Duration_accuracy: 0.8347 - Class_accuracy: 0.3149 - val_loss: 1.1702 - val_Bandwidth_loss: 0.3836 - val_Duration_loss: 0.4898 - val_Class_loss: 0.2968 - val_Bandwidth_accuracy: 0.8467 - val_Duration_accuracy: 0.8333 - val_Class_accuracy: 0.8933\n",
      "Epoch 16/20\n",
      "96/96 [==============================] - 1s 10ms/step - loss: 2.0462 - Bandwidth_loss: 0.1966 - Duration_loss: 0.3403 - Class_loss: 1.5093 - Bandwidth_accuracy: 0.9151 - Duration_accuracy: 0.8330 - Class_accuracy: 0.3149 - val_loss: 1.2447 - val_Bandwidth_loss: 0.3913 - val_Duration_loss: 0.5355 - val_Class_loss: 0.3179 - val_Bandwidth_accuracy: 0.8600 - val_Duration_accuracy: 0.8267 - val_Class_accuracy: 0.9067\n",
      "Epoch 17/20\n",
      "96/96 [==============================] - 1s 9ms/step - loss: 2.0278 - Bandwidth_loss: 0.1925 - Duration_loss: 0.3263 - Class_loss: 1.5090 - Bandwidth_accuracy: 0.9163 - Duration_accuracy: 0.8400 - Class_accuracy: 0.3149 - val_loss: 1.3067 - val_Bandwidth_loss: 0.4098 - val_Duration_loss: 0.5529 - val_Class_loss: 0.3440 - val_Bandwidth_accuracy: 0.8400 - val_Duration_accuracy: 0.7933 - val_Class_accuracy: 0.9067\n",
      "Epoch 18/20\n",
      "96/96 [==============================] - 1s 9ms/step - loss: 2.0210 - Bandwidth_loss: 0.1885 - Duration_loss: 0.3239 - Class_loss: 1.5087 - Bandwidth_accuracy: 0.9150 - Duration_accuracy: 0.8384 - Class_accuracy: 0.3150 - val_loss: 1.3378 - val_Bandwidth_loss: 0.4743 - val_Duration_loss: 0.5210 - val_Class_loss: 0.3425 - val_Bandwidth_accuracy: 0.8467 - val_Duration_accuracy: 0.7933 - val_Class_accuracy: 0.8800\n",
      "Epoch 19/20\n",
      "96/96 [==============================] - 1s 10ms/step - loss: 1.9871 - Bandwidth_loss: 0.1755 - Duration_loss: 0.3031 - Class_loss: 1.5085 - Bandwidth_accuracy: 0.9215 - Duration_accuracy: 0.8498 - Class_accuracy: 0.3150 - val_loss: 1.4957 - val_Bandwidth_loss: 0.5191 - val_Duration_loss: 0.5889 - val_Class_loss: 0.3876 - val_Bandwidth_accuracy: 0.8533 - val_Duration_accuracy: 0.8067 - val_Class_accuracy: 0.9000\n",
      "Epoch 20/20\n",
      "96/96 [==============================] - 1s 10ms/step - loss: 1.9757 - Bandwidth_loss: 0.1712 - Duration_loss: 0.2958 - Class_loss: 1.5087 - Bandwidth_accuracy: 0.9246 - Duration_accuracy: 0.8534 - Class_accuracy: 0.3147 - val_loss: 1.5014 - val_Bandwidth_loss: 0.4988 - val_Duration_loss: 0.5911 - val_Class_loss: 0.4116 - val_Bandwidth_accuracy: 0.8600 - val_Duration_accuracy: 0.8133 - val_Class_accuracy: 0.8800\n",
      "5/5 [==============================] - 0s 3ms/step - loss: 1.3313 - Bandwidth_loss: 0.3107 - Duration_loss: 0.6861 - Class_loss: 0.3345 - Bandwidth_accuracy: 0.9267 - Duration_accuracy: 0.7600 - Class_accuracy: 0.9133\n",
      "[1.3313469886779785, 0.3106950521469116, 0.6861070990562439, 0.3345448672771454, 0.9266666769981384, 0.7599999904632568, 0.9133333563804626]\n"
     ]
    }
   ],
   "source": [
    "#cnn\n",
    "import numpy as np\n",
    "from keras.models import Model\n",
    "from keras.layers import Dense\n",
    "from keras.layers import multiply\n",
    "from keras.layers import Flatten\n",
    "from keras.layers import Input\n",
    "from keras.layers.convolutional import Conv1D, MaxPooling1D\n",
    "from keras.layers import Activation\n",
    "from keras.optimizers import Adam\n",
    "\n",
    "timestep = 60\n",
    "np.random.seed(10)\n",
    "\n",
    "num_class = 5\n",
    "train_sample_per_class = 20\n",
    "lambda_value = 1\n",
    "\n",
    "\n",
    "trainData = np.load(\"trainData.npy\")\n",
    "trainlabel = np.load(\"trainLabel.npy\")\n",
    "# trainData = trainData[:, -timestep*2:]\n",
    "# trainlabel = trainlabel[:, -timestep*2:]\n",
    "trainData = trainData[:, :timestep*2]\n",
    "trainlabel = trainlabel[:, :timestep*2]\n",
    "trainlabel = trainlabel.astype(int)\n",
    "\n",
    "trainmask = np.zeros((trainlabel.shape[0],256))\n",
    "\n",
    "class_counter = np.zeros((num_class))\n",
    "train_size = trainlabel.shape[0]\n",
    "j = 0\n",
    "for i in range(train_size):\n",
    "    class_id = trainlabel[i,2] - 1\n",
    "    if class_counter[class_id] < train_sample_per_class:\n",
    "        trainmask[i, :] = 1\n",
    "        j += 1\n",
    "        class_counter[class_id] += 1\n",
    "print(\"unmasked samples: \", str(np.sum(trainmask==1)/256))\n",
    "\n",
    "\n",
    "valData = np.load(\"valData.npy\")\n",
    "valLabel = np.load(\"valLabel.npy\")\n",
    "# testData = testData[:, -timestep*2:]\n",
    "# testLabel = testLabel[:, -timestep*2:]\n",
    "valData = valData[:, :timestep*2]\n",
    "valLabel = valLabel[:, :timestep*2]\n",
    "\n",
    "valLabel = valLabel.astype(int)\n",
    "valmask = np.ones((valLabel.shape[0], 256))\n",
    "valmask[:,:]=1\n",
    "\n",
    "\n",
    "testData = np.load(\"testData.npy\")\n",
    "testLabel = np.load(\"testLabel.npy\")\n",
    "# testData = testData[:, -timestep*2:]\n",
    "# testLabel = testLabel[:, -timestep*2:]\n",
    "testData = testData[:, :timestep*2]\n",
    "testLabel = testLabel[:, :timestep*2]\n",
    "\n",
    "testLabel = testLabel.astype(int)\n",
    "testmask = np.ones((testLabel.shape[0], 256))\n",
    "testmask[:,:]=1\n",
    "\n",
    "for i in range(trainlabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if trainlabel[i, 0] < 10000:\n",
    "        trainlabel[i, 0] = 1\n",
    "    elif trainlabel[i, 0] < 50000:\n",
    "        trainlabel[i, 0] = 2\n",
    "    elif trainlabel[i, 0] < 100000:\n",
    "        trainlabel[i, 0] = 3\n",
    "    elif trainlabel[i, 0] < 1000000:\n",
    "        trainlabel[i, 0] = 4\n",
    "    else:\n",
    "        trainlabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if trainlabel[i, 1] < 10:\n",
    "        trainlabel[i, 1] = 1\n",
    "    elif trainlabel[i, 1] < 30:\n",
    "        trainlabel[i, 1] = 2\n",
    "    elif trainlabel[i, 1] < 60:\n",
    "        trainlabel[i, 1] = 3\n",
    "    else:\n",
    "        trainlabel[i, 1] = 4\n",
    "\n",
    "for i in range(valLabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if valLabel[i, 0] < 10000:\n",
    "        valLabel[i, 0] = 1\n",
    "    elif valLabel[i, 0] < 50000:\n",
    "        valLabel[i, 0] = 2\n",
    "    elif valLabel[i, 0] < 100000:\n",
    "        valLabel[i, 0] = 3\n",
    "    elif valLabel[i, 0] < 1000000:\n",
    "        valLabel[i, 0] = 4\n",
    "    else:\n",
    "        valLabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if valLabel[i, 1] < 10:\n",
    "        valLabel[i, 1] = 1\n",
    "    elif valLabel[i, 1] < 30:\n",
    "        valLabel[i, 1] = 2\n",
    "    elif valLabel[i, 1] < 60:\n",
    "        valLabel[i, 1] = 3\n",
    "    else:\n",
    "        valLabel[i, 1] = 4\n",
    "\n",
    "\n",
    "for i in range(testLabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if testLabel[i, 0] < 10000:\n",
    "        testLabel[i, 0] = 1\n",
    "    elif testLabel[i, 0] < 50000:\n",
    "        testLabel[i, 0] = 2\n",
    "    elif testLabel[i, 0] < 100000:\n",
    "        testLabel[i, 0] = 3\n",
    "    elif testLabel[i, 0] < 1000000:\n",
    "        testLabel[i, 0] = 4\n",
    "    else:\n",
    "        testLabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if testLabel[i, 1] < 10:\n",
    "        testLabel[i, 1] = 1\n",
    "    elif testLabel[i, 1] < 30:\n",
    "        testLabel[i, 1] = 2\n",
    "    elif testLabel[i, 1] < 60:\n",
    "        testLabel[i, 1] = 3\n",
    "    else:\n",
    "        testLabel[i, 1] = 4\n",
    "\n",
    "\n",
    "train_size = trainlabel.shape[0]\n",
    "Y_train1 = np.zeros((train_size,5))\n",
    "Y_train1[np.arange(train_size),trainlabel[:,0]-1] = 1\n",
    "Y_train2 = np.zeros((train_size,4))\n",
    "Y_train2[np.arange(train_size),trainlabel[:,1]-1] = 1\n",
    "Y_train3 = np.zeros((train_size,5))\n",
    "Y_train3[np.arange(train_size),trainlabel[:,2]-1] = 1\n",
    "\n",
    "val_size = valLabel.shape[0]\n",
    "Y_val1 = np.zeros((val_size,5))\n",
    "Y_val1[np.arange(val_size),valLabel[:,0]-1] = 1\n",
    "Y_val2 = np.zeros((val_size,4))\n",
    "Y_val2[np.arange(val_size),valLabel[:,1]-1] = 1\n",
    "Y_val3 = np.zeros((val_size,5))\n",
    "Y_val3[np.arange(val_size),valLabel[:,2]-1] = 1\n",
    "\n",
    "test_size = testLabel.shape[0]\n",
    "Y_test1 = np.zeros((test_size,5))\n",
    "Y_test1[np.arange(test_size),testLabel[:,0]-1] = 1\n",
    "Y_test2 = np.zeros((test_size,4))\n",
    "Y_test2[np.arange(test_size),testLabel[:,1]-1] = 1\n",
    "Y_test3 = np.zeros((test_size,5))\n",
    "Y_test3[np.arange(test_size),testLabel[:,2]-1] = 1\n",
    "\n",
    "# trainData = np.expand_dims(trainData, axis=-1)\n",
    "# testData = np.expand_dims(testData, axis=-1)\n",
    "trainData = trainData.reshape((trainData.shape[0], timestep, 2))\n",
    "testData = testData.reshape((testData.shape[0], timestep, 2))\n",
    "valData = valData.reshape((valData.shape[0], timestep, 2))\n",
    "\n",
    "def base_model():\n",
    "\n",
    "    model_input = Input(shape=(timestep,2))\n",
    "    mask_input = Input(shape=(256,))\n",
    "\n",
    "    x = Conv1D(32, 3, activation='relu')(model_input)\n",
    "    x = Conv1D(32, 3, activation='relu')(x)\n",
    "    x = MaxPooling1D(pool_size=(2))(x)\n",
    "\n",
    "    x = Conv1D(64, 3, activation='relu')(x)\n",
    "    x = Conv1D(64, 3, activation='relu')(x)\n",
    "    x = MaxPooling1D(pool_size=(2))(x)\n",
    "\n",
    "    x = Conv1D(128, 3, activation='relu')(x)\n",
    "    x = Conv1D(128, 3, activation='relu')(x)\n",
    "    x = MaxPooling1D(pool_size=(2))(x)\n",
    "\n",
    "    x = Flatten()(x)\n",
    "\n",
    "    x = Dense(256)(x)\n",
    "    x = Activation('relu')(x)\n",
    "    x = Dense(256)(x)\n",
    "    x = Activation('relu')(x)\n",
    "\n",
    "    output1 = Dense(5, activation='softmax', name='Bandwidth')(x)\n",
    "\n",
    "    output2 = Dense(4, activation='softmax', name='Duration')(x)\n",
    "\n",
    "    x3 = multiply([x,mask_input])\n",
    "    output3 = Dense(5, activation='softmax', name='Class')(x3)\n",
    "\n",
    "    model = Model(inputs=[model_input,mask_input], outputs=[output1, output2, output3])\n",
    "    opt = Adam(clipnorm = 1.)\n",
    "    model.compile(loss=['categorical_crossentropy', 'categorical_crossentropy', 'categorical_crossentropy'], loss_weights=[1,1,lambda_value], optimizer=opt, metrics=['accuracy'])\n",
    "\n",
    "    return model\n",
    "\n",
    "model = base_model()\n",
    "\n",
    "model.fit([trainData,trainmask], [Y_train1, Y_train2, Y_train3],\n",
    "          validation_data = ([valData, valmask], [Y_val1, Y_val2, Y_val3]),\n",
    "          batch_size = 64, epochs = 20, verbose = True, shuffle = True)\n",
    "\n",
    "result = model.evaluate([testData, testmask], [Y_test1, Y_test2, Y_test3])\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "unmasked samples:  100.0\n",
      "Epoch 1/20\n",
      "96/96 [==============================] - 19s 171ms/step - loss: 4.1177 - Bandwidth_loss: 1.2538 - Duration_loss: 1.2676 - Class_loss: 1.5963 - Bandwidth_accuracy: 0.5165 - Duration_accuracy: 0.3629 - Class_accuracy: 0.2974 - val_loss: 4.0947 - val_Bandwidth_loss: 1.3006 - val_Duration_loss: 1.3251 - val_Class_loss: 1.4690 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2800\n",
      "Epoch 2/20\n",
      "96/96 [==============================] - 17s 178ms/step - loss: 4.0256 - Bandwidth_loss: 1.2106 - Duration_loss: 1.2381 - Class_loss: 1.5769 - Bandwidth_accuracy: 0.5340 - Duration_accuracy: 0.3861 - Class_accuracy: 0.3033 - val_loss: 4.1979 - val_Bandwidth_loss: 1.3372 - val_Duration_loss: 1.3790 - val_Class_loss: 1.4817 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3667 - val_Class_accuracy: 0.2400\n",
      "Epoch 3/20\n",
      "96/96 [==============================] - 19s 193ms/step - loss: 4.1752 - Bandwidth_loss: 1.3077 - Duration_loss: 1.3032 - Class_loss: 1.5642 - Bandwidth_accuracy: 0.4963 - Duration_accuracy: 0.3496 - Class_accuracy: 0.3028 - val_loss: 4.3638 - val_Bandwidth_loss: 1.4127 - val_Duration_loss: 1.3992 - val_Class_loss: 1.5520 - val_Bandwidth_accuracy: 0.3600 - val_Duration_accuracy: 0.2133 - val_Class_accuracy: 0.2867\n",
      "Epoch 4/20\n",
      "96/96 [==============================] - 19s 197ms/step - loss: 4.0108 - Bandwidth_loss: 1.2111 - Duration_loss: 1.2458 - Class_loss: 1.5538 - Bandwidth_accuracy: 0.5291 - Duration_accuracy: 0.3764 - Class_accuracy: 0.3038 - val_loss: 4.1092 - val_Bandwidth_loss: 1.3122 - val_Duration_loss: 1.3425 - val_Class_loss: 1.4545 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2867\n",
      "Epoch 5/20\n",
      "96/96 [==============================] - 19s 197ms/step - loss: 3.9849 - Bandwidth_loss: 1.2009 - Duration_loss: 1.2367 - Class_loss: 1.5473 - Bandwidth_accuracy: 0.5356 - Duration_accuracy: 0.3791 - Class_accuracy: 0.3030 - val_loss: 4.0727 - val_Bandwidth_loss: 1.2964 - val_Duration_loss: 1.3182 - val_Class_loss: 1.4581 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2867\n",
      "Epoch 6/20\n",
      "96/96 [==============================] - 19s 193ms/step - loss: 3.9787 - Bandwidth_loss: 1.1989 - Duration_loss: 1.2372 - Class_loss: 1.5426 - Bandwidth_accuracy: 0.5358 - Duration_accuracy: 0.3833 - Class_accuracy: 0.3038 - val_loss: 4.0989 - val_Bandwidth_loss: 1.3116 - val_Duration_loss: 1.3334 - val_Class_loss: 1.4539 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2867\n",
      "Epoch 7/20\n",
      "96/96 [==============================] - 18s 191ms/step - loss: 3.9765 - Bandwidth_loss: 1.1994 - Duration_loss: 1.2379 - Class_loss: 1.5392 - Bandwidth_accuracy: 0.5353 - Duration_accuracy: 0.3782 - Class_accuracy: 0.3033 - val_loss: 4.0840 - val_Bandwidth_loss: 1.3033 - val_Duration_loss: 1.3270 - val_Class_loss: 1.4538 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2867\n",
      "Epoch 8/20\n",
      "96/96 [==============================] - 18s 191ms/step - loss: 3.9708 - Bandwidth_loss: 1.1994 - Duration_loss: 1.2345 - Class_loss: 1.5369 - Bandwidth_accuracy: 0.5358 - Duration_accuracy: 0.3836 - Class_accuracy: 0.3027 - val_loss: 4.1041 - val_Bandwidth_loss: 1.3094 - val_Duration_loss: 1.3369 - val_Class_loss: 1.4578 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.2867 - val_Class_accuracy: 0.2867\n",
      "Epoch 9/20\n",
      "96/96 [==============================] - 19s 193ms/step - loss: 3.9957 - Bandwidth_loss: 1.2115 - Duration_loss: 1.2486 - Class_loss: 1.5357 - Bandwidth_accuracy: 0.5333 - Duration_accuracy: 0.3761 - Class_accuracy: 0.3025 - val_loss: 4.1770 - val_Bandwidth_loss: 1.3338 - val_Duration_loss: 1.3840 - val_Class_loss: 1.4591 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2867\n",
      "Epoch 10/20\n",
      "96/96 [==============================] - 19s 194ms/step - loss: 4.0245 - Bandwidth_loss: 1.2338 - Duration_loss: 1.2564 - Class_loss: 1.5343 - Bandwidth_accuracy: 0.5214 - Duration_accuracy: 0.3725 - Class_accuracy: 0.3033 - val_loss: 4.0639 - val_Bandwidth_loss: 1.2885 - val_Duration_loss: 1.3231 - val_Class_loss: 1.4523 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2867\n",
      "Epoch 11/20\n",
      "96/96 [==============================] - 20s 208ms/step - loss: 3.9778 - Bandwidth_loss: 1.2048 - Duration_loss: 1.2396 - Class_loss: 1.5334 - Bandwidth_accuracy: 0.5351 - Duration_accuracy: 0.3781 - Class_accuracy: 0.3038 - val_loss: 4.0956 - val_Bandwidth_loss: 1.3048 - val_Duration_loss: 1.3372 - val_Class_loss: 1.4536 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2867\n",
      "Epoch 12/20\n",
      "96/96 [==============================] - 21s 216ms/step - loss: 3.9994 - Bandwidth_loss: 1.2191 - Duration_loss: 1.2477 - Class_loss: 1.5326 - Bandwidth_accuracy: 0.5284 - Duration_accuracy: 0.3831 - Class_accuracy: 0.3036 - val_loss: 4.0749 - val_Bandwidth_loss: 1.2936 - val_Duration_loss: 1.3285 - val_Class_loss: 1.4527 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2867\n",
      "Epoch 13/20\n",
      "96/96 [==============================] - 19s 200ms/step - loss: 3.9709 - Bandwidth_loss: 1.2022 - Duration_loss: 1.2365 - Class_loss: 1.5322 - Bandwidth_accuracy: 0.5359 - Duration_accuracy: 0.3874 - Class_accuracy: 0.3027 - val_loss: 4.0746 - val_Bandwidth_loss: 1.3033 - val_Duration_loss: 1.3186 - val_Class_loss: 1.4527 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2867\n",
      "Epoch 14/20\n",
      "96/96 [==============================] - 19s 195ms/step - loss: 3.9653 - Bandwidth_loss: 1.1984 - Duration_loss: 1.2350 - Class_loss: 1.5319 - Bandwidth_accuracy: 0.5354 - Duration_accuracy: 0.3887 - Class_accuracy: 0.3033 - val_loss: 4.0754 - val_Bandwidth_loss: 1.2994 - val_Duration_loss: 1.3214 - val_Class_loss: 1.4546 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2867\n",
      "Epoch 15/20\n",
      "96/96 [==============================] - 19s 193ms/step - loss: 3.9650 - Bandwidth_loss: 1.1989 - Duration_loss: 1.2345 - Class_loss: 1.5316 - Bandwidth_accuracy: 0.5358 - Duration_accuracy: 0.3883 - Class_accuracy: 0.3035 - val_loss: 4.1065 - val_Bandwidth_loss: 1.3166 - val_Duration_loss: 1.3377 - val_Class_loss: 1.4523 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3733 - val_Class_accuracy: 0.2867\n",
      "Epoch 16/20\n",
      "96/96 [==============================] - 19s 194ms/step - loss: 3.9612 - Bandwidth_loss: 1.1970 - Duration_loss: 1.2327 - Class_loss: 1.5315 - Bandwidth_accuracy: 0.5358 - Duration_accuracy: 0.3932 - Class_accuracy: 0.3031 - val_loss: 4.1019 - val_Bandwidth_loss: 1.3196 - val_Duration_loss: 1.3364 - val_Class_loss: 1.4459 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.2867 - val_Class_accuracy: 0.2800\n",
      "Epoch 17/20\n",
      "96/96 [==============================] - 18s 191ms/step - loss: 3.7226 - Bandwidth_loss: 1.0657 - Duration_loss: 1.1275 - Class_loss: 1.5294 - Bandwidth_accuracy: 0.5382 - Duration_accuracy: 0.5222 - Class_accuracy: 0.3043 - val_loss: 3.8900 - val_Bandwidth_loss: 1.2178 - val_Duration_loss: 1.3102 - val_Class_loss: 1.3620 - val_Bandwidth_accuracy: 0.4600 - val_Duration_accuracy: 0.4267 - val_Class_accuracy: 0.3667\n",
      "Epoch 18/20\n",
      "96/96 [==============================] - 19s 195ms/step - loss: 4.3278 - Bandwidth_loss: 1.4383 - Duration_loss: 1.3562 - Class_loss: 1.5333 - Bandwidth_accuracy: 0.3976 - Duration_accuracy: 0.3108 - Class_accuracy: 0.3023 - val_loss: 4.4875 - val_Bandwidth_loss: 1.4710 - val_Duration_loss: 1.3996 - val_Class_loss: 1.6169 - val_Bandwidth_accuracy: 0.3600 - val_Duration_accuracy: 0.2933 - val_Class_accuracy: 0.2000\n",
      "Epoch 19/20\n",
      "96/96 [==============================] - 19s 194ms/step - loss: 4.3400 - Bandwidth_loss: 1.4407 - Duration_loss: 1.3649 - Class_loss: 1.5344 - Bandwidth_accuracy: 0.3914 - Duration_accuracy: 0.3062 - Class_accuracy: 0.3022 - val_loss: 4.5254 - val_Bandwidth_loss: 1.5009 - val_Duration_loss: 1.4104 - val_Class_loss: 1.6141 - val_Bandwidth_accuracy: 0.3600 - val_Duration_accuracy: 0.2133 - val_Class_accuracy: 0.2000\n",
      "Epoch 20/20\n",
      "96/96 [==============================] - 18s 190ms/step - loss: 4.3371 - Bandwidth_loss: 1.4394 - Duration_loss: 1.3633 - Class_loss: 1.5344 - Bandwidth_accuracy: 0.3914 - Duration_accuracy: 0.3115 - Class_accuracy: 0.3014 - val_loss: 4.4917 - val_Bandwidth_loss: 1.4797 - val_Duration_loss: 1.3994 - val_Class_loss: 1.6126 - val_Bandwidth_accuracy: 0.3600 - val_Duration_accuracy: 0.2133 - val_Class_accuracy: 0.2000\n",
      "5/5 [==============================] - 0s 21ms/step - loss: 4.4894 - Bandwidth_loss: 1.4968 - Duration_loss: 1.3800 - Class_loss: 1.6126 - Bandwidth_accuracy: 0.3400 - Duration_accuracy: 0.2333 - Class_accuracy: 0.2000\n",
      "[4.4893646240234375, 1.4967787265777588, 1.380035161972046, 1.6125506162643433, 0.3400000035762787, 0.23333333432674408, 0.20000000298023224]\n"
     ]
    }
   ],
   "source": [
    "#LSTM\n",
    "import numpy as np\n",
    "from keras.models import Model\n",
    "from keras.layers import Dense, LSTM, multiply, Flatten, Input\n",
    "from keras.layers import Activation\n",
    "from keras.optimizers import Adam\n",
    "\n",
    "timestep = 60\n",
    "np.random.seed(10)\n",
    "\n",
    "num_class = 5\n",
    "train_sample_per_class = 20\n",
    "lambda_value = 1\n",
    "\n",
    "\n",
    "trainData = np.load(\"trainData.npy\")\n",
    "trainlabel = np.load(\"trainLabel.npy\")\n",
    "# trainData = trainData[:, -timestep*2:]\n",
    "# trainlabel = trainlabel[:, -timestep*2:]\n",
    "trainData = trainData[:, :timestep*2]\n",
    "trainlabel = trainlabel[:, :timestep*2]\n",
    "trainlabel = trainlabel.astype(int)\n",
    "\n",
    "trainmask = np.zeros((trainlabel.shape[0],256))\n",
    "\n",
    "class_counter = np.zeros((num_class))\n",
    "train_size = trainlabel.shape[0]\n",
    "j = 0\n",
    "for i in range(train_size):\n",
    "    class_id = trainlabel[i,2] - 1\n",
    "    if class_counter[class_id] < train_sample_per_class:\n",
    "        trainmask[i, :] = 1\n",
    "        j += 1\n",
    "        class_counter[class_id] += 1\n",
    "print(\"unmasked samples: \", str(np.sum(trainmask==1)/256))\n",
    "\n",
    "\n",
    "valData = np.load(\"valData.npy\")\n",
    "valLabel = np.load(\"valLabel.npy\")\n",
    "# testData = testData[:, -timestep*2:]\n",
    "# testLabel = testLabel[:, -timestep*2:]\n",
    "valData = valData[:, :timestep*2]\n",
    "valLabel = valLabel[:, :timestep*2]\n",
    "\n",
    "valLabel = valLabel.astype(int)\n",
    "valmask = np.ones((valLabel.shape[0], 256))\n",
    "valmask[:,:]=1\n",
    "\n",
    "\n",
    "testData = np.load(\"testData.npy\")\n",
    "testLabel = np.load(\"testLabel.npy\")\n",
    "# testData = testData[:, -timestep*2:]\n",
    "# testLabel = testLabel[:, -timestep*2:]\n",
    "testData = testData[:, :timestep*2]\n",
    "testLabel = testLabel[:, :timestep*2]\n",
    "\n",
    "testLabel = testLabel.astype(int)\n",
    "testmask = np.ones((testLabel.shape[0], 256))\n",
    "testmask[:,:]=1\n",
    "\n",
    "for i in range(trainlabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if trainlabel[i, 0] < 10000:\n",
    "        trainlabel[i, 0] = 1\n",
    "    elif trainlabel[i, 0] < 50000:\n",
    "        trainlabel[i, 0] = 2\n",
    "    elif trainlabel[i, 0] < 100000:\n",
    "        trainlabel[i, 0] = 3\n",
    "    elif trainlabel[i, 0] < 1000000:\n",
    "        trainlabel[i, 0] = 4\n",
    "    else:\n",
    "        trainlabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if trainlabel[i, 1] < 10:\n",
    "        trainlabel[i, 1] = 1\n",
    "    elif trainlabel[i, 1] < 30:\n",
    "        trainlabel[i, 1] = 2\n",
    "    elif trainlabel[i, 1] < 60:\n",
    "        trainlabel[i, 1] = 3\n",
    "    else:\n",
    "        trainlabel[i, 1] = 4\n",
    "\n",
    "for i in range(valLabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if valLabel[i, 0] < 10000:\n",
    "        valLabel[i, 0] = 1\n",
    "    elif valLabel[i, 0] < 50000:\n",
    "        valLabel[i, 0] = 2\n",
    "    elif valLabel[i, 0] < 100000:\n",
    "        valLabel[i, 0] = 3\n",
    "    elif valLabel[i, 0] < 1000000:\n",
    "        valLabel[i, 0] = 4\n",
    "    else:\n",
    "        valLabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if valLabel[i, 1] < 10:\n",
    "        valLabel[i, 1] = 1\n",
    "    elif valLabel[i, 1] < 30:\n",
    "        valLabel[i, 1] = 2\n",
    "    elif valLabel[i, 1] < 60:\n",
    "        valLabel[i, 1] = 3\n",
    "    else:\n",
    "        valLabel[i, 1] = 4\n",
    "\n",
    "\n",
    "for i in range(testLabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if testLabel[i, 0] < 10000:\n",
    "        testLabel[i, 0] = 1\n",
    "    elif testLabel[i, 0] < 50000:\n",
    "        testLabel[i, 0] = 2\n",
    "    elif testLabel[i, 0] < 100000:\n",
    "        testLabel[i, 0] = 3\n",
    "    elif testLabel[i, 0] < 1000000:\n",
    "        testLabel[i, 0] = 4\n",
    "    else:\n",
    "        testLabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if testLabel[i, 1] < 10:\n",
    "        testLabel[i, 1] = 1\n",
    "    elif testLabel[i, 1] < 30:\n",
    "        testLabel[i, 1] = 2\n",
    "    elif testLabel[i, 1] < 60:\n",
    "        testLabel[i, 1] = 3\n",
    "    else:\n",
    "        testLabel[i, 1] = 4\n",
    "\n",
    "\n",
    "train_size = trainlabel.shape[0]\n",
    "Y_train1 = np.zeros((train_size,5))\n",
    "Y_train1[np.arange(train_size),trainlabel[:,0]-1] = 1\n",
    "Y_train2 = np.zeros((train_size,4))\n",
    "Y_train2[np.arange(train_size),trainlabel[:,1]-1] = 1\n",
    "Y_train3 = np.zeros((train_size,5))\n",
    "Y_train3[np.arange(train_size),trainlabel[:,2]-1] = 1\n",
    "\n",
    "val_size = valLabel.shape[0]\n",
    "Y_val1 = np.zeros((val_size,5))\n",
    "Y_val1[np.arange(val_size),valLabel[:,0]-1] = 1\n",
    "Y_val2 = np.zeros((val_size,4))\n",
    "Y_val2[np.arange(val_size),valLabel[:,1]-1] = 1\n",
    "Y_val3 = np.zeros((val_size,5))\n",
    "Y_val3[np.arange(val_size),valLabel[:,2]-1] = 1\n",
    "\n",
    "test_size = testLabel.shape[0]\n",
    "Y_test1 = np.zeros((test_size,5))\n",
    "Y_test1[np.arange(test_size),testLabel[:,0]-1] = 1\n",
    "Y_test2 = np.zeros((test_size,4))\n",
    "Y_test2[np.arange(test_size),testLabel[:,1]-1] = 1\n",
    "Y_test3 = np.zeros((test_size,5))\n",
    "Y_test3[np.arange(test_size),testLabel[:,2]-1] = 1\n",
    "\n",
    "# trainData = np.expand_dims(trainData, axis=-1)\n",
    "# testData = np.expand_dims(testData, axis=-1)\n",
    "trainData = trainData.reshape((trainData.shape[0], timestep, 2))\n",
    "testData = testData.reshape((testData.shape[0], timestep, 2))\n",
    "valData = valData.reshape((valData.shape[0], timestep, 2))\n",
    "\n",
    "\n",
    "# 保留原始数据处理部分的代码\n",
    "\n",
    "def lstm_model():\n",
    "\n",
    "    model_input = Input(shape=(timestep, 2))\n",
    "    mask_input = Input(shape=(256,))\n",
    "\n",
    "    x = LSTM(128, return_sequences=True)(model_input)\n",
    "    x = LSTM(128, return_sequences=True)(x)\n",
    "    x = LSTM(128)(x)\n",
    "\n",
    "    x = Dense(256)(x)\n",
    "    x = Activation('relu')(x)\n",
    "    x = Dense(256)(x)\n",
    "    x = Activation('relu')(x)\n",
    "\n",
    "    output1 = Dense(5, activation='softmax', name='Bandwidth')(x)\n",
    "    output2 = Dense(4, activation='softmax', name='Duration')(x)\n",
    "\n",
    "    x3 = multiply([x, mask_input])\n",
    "    output3 = Dense(5, activation='softmax', name='Class')(x3)\n",
    "\n",
    "    model = Model(inputs=[model_input, mask_input], outputs=[output1, output2, output3])\n",
    "    opt = Adam(clipnorm=1.)\n",
    "    model.compile(loss=['categorical_crossentropy', 'categorical_crossentropy', 'categorical_crossentropy'],\n",
    "                  loss_weights=[1, 1, lambda_value], optimizer=opt, metrics=['accuracy'])\n",
    "\n",
    "    return model\n",
    "\n",
    "model = lstm_model()\n",
    "\n",
    "model.fit([trainData, trainmask], [Y_train1, Y_train2, Y_train3],\n",
    "          validation_data=([valData, valmask], [Y_val1, Y_val2, Y_val3]),\n",
    "          batch_size=64, epochs=20, verbose=True, shuffle=True)\n",
    "\n",
    "result = model.evaluate([testData, testmask], [Y_test1, Y_test2, Y_test3])\n",
    "print(result)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "unmasked samples:  100.0\n",
      "Epoch 1/20\n",
      "96/96 [==============================] - 2s 13ms/step - loss: 3.6181 - Bandwidth_loss: 0.9963 - Duration_loss: 1.0227 - Class_loss: 1.5991 - Bandwidth_accuracy: 0.5879 - Duration_accuracy: 0.5535 - Class_accuracy: 0.3020 - val_loss: 3.0209 - val_Bandwidth_loss: 0.8232 - val_Duration_loss: 0.9102 - val_Class_loss: 1.2874 - val_Bandwidth_accuracy: 0.6600 - val_Duration_accuracy: 0.5800 - val_Class_accuracy: 0.4000\n",
      "Epoch 2/20\n",
      "96/96 [==============================] - 1s 10ms/step - loss: 2.8617 - Bandwidth_loss: 0.5943 - Duration_loss: 0.6969 - Class_loss: 1.5705 - Bandwidth_accuracy: 0.7521 - Duration_accuracy: 0.7122 - Class_accuracy: 0.3085 - val_loss: 2.2150 - val_Bandwidth_loss: 0.6357 - val_Duration_loss: 0.7121 - val_Class_loss: 0.8672 - val_Bandwidth_accuracy: 0.7467 - val_Duration_accuracy: 0.7333 - val_Class_accuracy: 0.7000\n",
      "Epoch 3/20\n",
      "96/96 [==============================] - 1s 11ms/step - loss: 2.6187 - Bandwidth_loss: 0.4855 - Duration_loss: 0.5823 - Class_loss: 1.5509 - Bandwidth_accuracy: 0.8039 - Duration_accuracy: 0.7560 - Class_accuracy: 0.3113 - val_loss: 2.1412 - val_Bandwidth_loss: 0.6558 - val_Duration_loss: 0.7546 - val_Class_loss: 0.7308 - val_Bandwidth_accuracy: 0.7467 - val_Duration_accuracy: 0.7400 - val_Class_accuracy: 0.7667\n",
      "Epoch 4/20\n",
      "96/96 [==============================] - 1s 11ms/step - loss: 2.5138 - Bandwidth_loss: 0.4467 - Duration_loss: 0.5284 - Class_loss: 1.5387 - Bandwidth_accuracy: 0.8218 - Duration_accuracy: 0.7685 - Class_accuracy: 0.3123 - val_loss: 1.7258 - val_Bandwidth_loss: 0.5377 - val_Duration_loss: 0.6023 - val_Class_loss: 0.5858 - val_Bandwidth_accuracy: 0.8067 - val_Duration_accuracy: 0.7800 - val_Class_accuracy: 0.7933\n",
      "Epoch 5/20\n",
      "96/96 [==============================] - 1s 11ms/step - loss: 2.4247 - Bandwidth_loss: 0.3996 - Duration_loss: 0.4949 - Class_loss: 1.5302 - Bandwidth_accuracy: 0.8337 - Duration_accuracy: 0.7868 - Class_accuracy: 0.3136 - val_loss: 1.6367 - val_Bandwidth_loss: 0.5218 - val_Duration_loss: 0.5983 - val_Class_loss: 0.5166 - val_Bandwidth_accuracy: 0.7933 - val_Duration_accuracy: 0.8000 - val_Class_accuracy: 0.8267\n",
      "Epoch 6/20\n",
      "96/96 [==============================] - 1s 11ms/step - loss: 2.3844 - Bandwidth_loss: 0.3833 - Duration_loss: 0.4777 - Class_loss: 1.5235 - Bandwidth_accuracy: 0.8464 - Duration_accuracy: 0.7869 - Class_accuracy: 0.3139 - val_loss: 1.5402 - val_Bandwidth_loss: 0.4923 - val_Duration_loss: 0.5529 - val_Class_loss: 0.4950 - val_Bandwidth_accuracy: 0.8200 - val_Duration_accuracy: 0.7867 - val_Class_accuracy: 0.8533\n",
      "Epoch 7/20\n",
      "96/96 [==============================] - 1s 12ms/step - loss: 2.3022 - Bandwidth_loss: 0.3364 - Duration_loss: 0.4465 - Class_loss: 1.5194 - Bandwidth_accuracy: 0.8638 - Duration_accuracy: 0.7948 - Class_accuracy: 0.3142 - val_loss: 1.5610 - val_Bandwidth_loss: 0.4687 - val_Duration_loss: 0.5751 - val_Class_loss: 0.5172 - val_Bandwidth_accuracy: 0.7933 - val_Duration_accuracy: 0.7600 - val_Class_accuracy: 0.8400\n",
      "Epoch 8/20\n",
      "96/96 [==============================] - 1s 12ms/step - loss: 2.2548 - Bandwidth_loss: 0.3101 - Duration_loss: 0.4283 - Class_loss: 1.5164 - Bandwidth_accuracy: 0.8742 - Duration_accuracy: 0.8083 - Class_accuracy: 0.3145 - val_loss: 1.4208 - val_Bandwidth_loss: 0.4609 - val_Duration_loss: 0.5254 - val_Class_loss: 0.4344 - val_Bandwidth_accuracy: 0.8333 - val_Duration_accuracy: 0.8067 - val_Class_accuracy: 0.8400\n",
      "Epoch 9/20\n",
      "96/96 [==============================] - 1s 12ms/step - loss: 2.2145 - Bandwidth_loss: 0.2863 - Duration_loss: 0.4128 - Class_loss: 1.5154 - Bandwidth_accuracy: 0.8884 - Duration_accuracy: 0.8094 - Class_accuracy: 0.3141 - val_loss: 1.3833 - val_Bandwidth_loss: 0.4287 - val_Duration_loss: 0.5288 - val_Class_loss: 0.4257 - val_Bandwidth_accuracy: 0.8200 - val_Duration_accuracy: 0.7800 - val_Class_accuracy: 0.8733\n",
      "Epoch 10/20\n",
      "96/96 [==============================] - 1s 11ms/step - loss: 2.1675 - Bandwidth_loss: 0.2597 - Duration_loss: 0.3950 - Class_loss: 1.5127 - Bandwidth_accuracy: 0.8905 - Duration_accuracy: 0.8119 - Class_accuracy: 0.3147 - val_loss: 1.2462 - val_Bandwidth_loss: 0.3882 - val_Duration_loss: 0.4790 - val_Class_loss: 0.3790 - val_Bandwidth_accuracy: 0.8600 - val_Duration_accuracy: 0.8200 - val_Class_accuracy: 0.8667\n",
      "Epoch 11/20\n",
      "96/96 [==============================] - 1s 11ms/step - loss: 2.1322 - Bandwidth_loss: 0.2408 - Duration_loss: 0.3796 - Class_loss: 1.5118 - Bandwidth_accuracy: 0.9005 - Duration_accuracy: 0.8198 - Class_accuracy: 0.3147 - val_loss: 1.3234 - val_Bandwidth_loss: 0.4255 - val_Duration_loss: 0.5082 - val_Class_loss: 0.3897 - val_Bandwidth_accuracy: 0.8267 - val_Duration_accuracy: 0.8000 - val_Class_accuracy: 0.8800\n",
      "Epoch 12/20\n",
      "96/96 [==============================] - 1s 11ms/step - loss: 2.1159 - Bandwidth_loss: 0.2370 - Duration_loss: 0.3675 - Class_loss: 1.5114 - Bandwidth_accuracy: 0.9019 - Duration_accuracy: 0.8251 - Class_accuracy: 0.3147 - val_loss: 1.3395 - val_Bandwidth_loss: 0.3981 - val_Duration_loss: 0.5458 - val_Class_loss: 0.3956 - val_Bandwidth_accuracy: 0.8400 - val_Duration_accuracy: 0.8067 - val_Class_accuracy: 0.8933\n",
      "Epoch 13/20\n",
      "96/96 [==============================] - 1s 12ms/step - loss: 2.0844 - Bandwidth_loss: 0.2190 - Duration_loss: 0.3547 - Class_loss: 1.5107 - Bandwidth_accuracy: 0.9070 - Duration_accuracy: 0.8295 - Class_accuracy: 0.3145 - val_loss: 1.1145 - val_Bandwidth_loss: 0.3348 - val_Duration_loss: 0.4712 - val_Class_loss: 0.3084 - val_Bandwidth_accuracy: 0.8867 - val_Duration_accuracy: 0.8333 - val_Class_accuracy: 0.8800\n",
      "Epoch 14/20\n",
      "96/96 [==============================] - 1s 12ms/step - loss: 2.0632 - Bandwidth_loss: 0.2061 - Duration_loss: 0.3472 - Class_loss: 1.5099 - Bandwidth_accuracy: 0.9112 - Duration_accuracy: 0.8270 - Class_accuracy: 0.3147 - val_loss: 1.3601 - val_Bandwidth_loss: 0.4138 - val_Duration_loss: 0.5622 - val_Class_loss: 0.3840 - val_Bandwidth_accuracy: 0.8533 - val_Duration_accuracy: 0.8000 - val_Class_accuracy: 0.8733\n",
      "Epoch 15/20\n",
      "96/96 [==============================] - 1s 12ms/step - loss: 2.0538 - Bandwidth_loss: 0.2052 - Duration_loss: 0.3392 - Class_loss: 1.5095 - Bandwidth_accuracy: 0.9085 - Duration_accuracy: 0.8369 - Class_accuracy: 0.3149 - val_loss: 1.1319 - val_Bandwidth_loss: 0.3361 - val_Duration_loss: 0.4920 - val_Class_loss: 0.3038 - val_Bandwidth_accuracy: 0.8667 - val_Duration_accuracy: 0.8267 - val_Class_accuracy: 0.9133\n",
      "Epoch 16/20\n",
      "96/96 [==============================] - 1s 11ms/step - loss: 2.0234 - Bandwidth_loss: 0.1906 - Duration_loss: 0.3236 - Class_loss: 1.5091 - Bandwidth_accuracy: 0.9174 - Duration_accuracy: 0.8433 - Class_accuracy: 0.3149 - val_loss: 1.2269 - val_Bandwidth_loss: 0.3627 - val_Duration_loss: 0.5154 - val_Class_loss: 0.3488 - val_Bandwidth_accuracy: 0.8733 - val_Duration_accuracy: 0.7800 - val_Class_accuracy: 0.9000\n",
      "Epoch 17/20\n",
      "96/96 [==============================] - 1s 11ms/step - loss: 2.0160 - Bandwidth_loss: 0.1878 - Duration_loss: 0.3189 - Class_loss: 1.5092 - Bandwidth_accuracy: 0.9189 - Duration_accuracy: 0.8449 - Class_accuracy: 0.3150 - val_loss: 1.3357 - val_Bandwidth_loss: 0.4035 - val_Duration_loss: 0.5462 - val_Class_loss: 0.3860 - val_Bandwidth_accuracy: 0.8533 - val_Duration_accuracy: 0.8267 - val_Class_accuracy: 0.8733\n",
      "Epoch 18/20\n",
      "96/96 [==============================] - 1s 12ms/step - loss: 2.0020 - Bandwidth_loss: 0.1824 - Duration_loss: 0.3106 - Class_loss: 1.5091 - Bandwidth_accuracy: 0.9181 - Duration_accuracy: 0.8470 - Class_accuracy: 0.3149 - val_loss: 1.3524 - val_Bandwidth_loss: 0.4060 - val_Duration_loss: 0.5998 - val_Class_loss: 0.3466 - val_Bandwidth_accuracy: 0.8667 - val_Duration_accuracy: 0.8000 - val_Class_accuracy: 0.9067\n",
      "Epoch 19/20\n",
      "96/96 [==============================] - 1s 11ms/step - loss: 1.9852 - Bandwidth_loss: 0.1768 - Duration_loss: 0.2998 - Class_loss: 1.5085 - Bandwidth_accuracy: 0.9203 - Duration_accuracy: 0.8527 - Class_accuracy: 0.3150 - val_loss: 1.1478 - val_Bandwidth_loss: 0.3316 - val_Duration_loss: 0.4922 - val_Class_loss: 0.3240 - val_Bandwidth_accuracy: 0.8600 - val_Duration_accuracy: 0.8267 - val_Class_accuracy: 0.9133\n",
      "Epoch 20/20\n",
      "96/96 [==============================] - 1s 11ms/step - loss: 1.9702 - Bandwidth_loss: 0.1706 - Duration_loss: 0.2911 - Class_loss: 1.5084 - Bandwidth_accuracy: 0.9216 - Duration_accuracy: 0.8552 - Class_accuracy: 0.3150 - val_loss: 1.3120 - val_Bandwidth_loss: 0.3940 - val_Duration_loss: 0.5695 - val_Class_loss: 0.3485 - val_Bandwidth_accuracy: 0.8533 - val_Duration_accuracy: 0.8333 - val_Class_accuracy: 0.9133\n",
      "5/5 [==============================] - 0s 3ms/step - loss: 1.3978 - Bandwidth_loss: 0.3604 - Duration_loss: 0.6804 - Class_loss: 0.3570 - Bandwidth_accuracy: 0.9333 - Duration_accuracy: 0.7533 - Class_accuracy: 0.9067\n",
      "[1.397759199142456, 0.3603903651237488, 0.6803898215293884, 0.35697901248931885, 0.9333333373069763, 0.753333330154419, 0.9066666960716248]\n"
     ]
    }
   ],
   "source": [
    "\"\"\"在优化深度学习模型的过程中，有很多种方法可以尝试，包括调整网络架构、修改损失函数、改变优化器的参数、使用预训练模型、改变训练数据的处理方式等等。以下提供一些可能的优化建议：\n",
    "\n",
    "使用更复杂的网络架构：更复杂的网络架构可能能够捕捉到更多的数据特征，从而提高模型的性能。例如，可以考虑使用更多的卷积层或者全连接层，或者使用不同类型的层，如循环神经网络（RNN）层\n",
    "或者自注意力（Self-Attention）层。\n",
    "\n",
    "修改损失函数或优化器：可以尝试使用不同的损失函数或者优化器，或者改变优化器的参数，如学习率、动量等。\n",
    "\n",
    "使用预训练模型：如果有可用的预训练模型，可以使用迁移学习的方式，将预训练模型的部分层作为新模型的一部分，只训练剩余的层。\n",
    "\n",
    "改变数据处理方式：可以尝试对数据进行不同的预处理操作，如归一化、标准化等。此外，也可以使用数据增强的方式增加训练数据的多样性。\n",
    "\n",
    "使用早停（Early Stopping）和模型检查点（Model Checkpoint）：早停可以防止模型过拟合，而模型检查点可以在训练过程中保存性能最好的模型。\"\"\"\n",
    "import numpy as np\n",
    "from keras.callbacks import EarlyStopping, ModelCheckpoint\n",
    "from keras.models import Model\n",
    "from keras.layers import Dense\n",
    "from keras.layers import multiply\n",
    "from keras.layers import Flatten\n",
    "from keras.layers import Input\n",
    "from keras.layers.convolutional import Conv1D, MaxPooling1D\n",
    "from keras.layers import Activation\n",
    "from keras.optimizers import Adam\n",
    "\n",
    "timestep = 60\n",
    "np.random.seed(10)\n",
    "\n",
    "num_class = 5\n",
    "train_sample_per_class = 20\n",
    "lambda_value = 1\n",
    "\n",
    "\n",
    "trainData = np.load(\"trainData.npy\")\n",
    "trainlabel = np.load(\"trainLabel.npy\")\n",
    "# trainData = trainData[:, -timestep*2:]\n",
    "# trainlabel = trainlabel[:, -timestep*2:]\n",
    "trainData = trainData[:, :timestep*2]\n",
    "trainlabel = trainlabel[:, :timestep*2]\n",
    "trainlabel = trainlabel.astype(int)\n",
    "\n",
    "trainmask = np.zeros((trainlabel.shape[0],256))\n",
    "\n",
    "class_counter = np.zeros((num_class))\n",
    "train_size = trainlabel.shape[0]\n",
    "j = 0\n",
    "for i in range(train_size):\n",
    "    class_id = trainlabel[i,2] - 1\n",
    "    if class_counter[class_id] < train_sample_per_class:\n",
    "        trainmask[i, :] = 1\n",
    "        j += 1\n",
    "        class_counter[class_id] += 1\n",
    "print(\"unmasked samples: \", str(np.sum(trainmask==1)/256))\n",
    "\n",
    "\n",
    "valData = np.load(\"valData.npy\")\n",
    "valLabel = np.load(\"valLabel.npy\")\n",
    "# testData = testData[:, -timestep*2:]\n",
    "# testLabel = testLabel[:, -timestep*2:]\n",
    "valData = valData[:, :timestep*2]\n",
    "valLabel = valLabel[:, :timestep*2]\n",
    "\n",
    "valLabel = valLabel.astype(int)\n",
    "valmask = np.ones((valLabel.shape[0], 256))\n",
    "valmask[:,:]=1\n",
    "\n",
    "\n",
    "testData = np.load(\"testData.npy\")\n",
    "testLabel = np.load(\"testLabel.npy\")\n",
    "# testData = testData[:, -timestep*2:]\n",
    "# testLabel = testLabel[:, -timestep*2:]\n",
    "testData = testData[:, :timestep*2]\n",
    "testLabel = testLabel[:, :timestep*2]\n",
    "\n",
    "testLabel = testLabel.astype(int)\n",
    "testmask = np.ones((testLabel.shape[0], 256))\n",
    "testmask[:,:]=1\n",
    "\n",
    "for i in range(trainlabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if trainlabel[i, 0] < 10000:\n",
    "        trainlabel[i, 0] = 1\n",
    "    elif trainlabel[i, 0] < 50000:\n",
    "        trainlabel[i, 0] = 2\n",
    "    elif trainlabel[i, 0] < 100000:\n",
    "        trainlabel[i, 0] = 3\n",
    "    elif trainlabel[i, 0] < 1000000:\n",
    "        trainlabel[i, 0] = 4\n",
    "    else:\n",
    "        trainlabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if trainlabel[i, 1] < 10:\n",
    "        trainlabel[i, 1] = 1\n",
    "    elif trainlabel[i, 1] < 30:\n",
    "        trainlabel[i, 1] = 2\n",
    "    elif trainlabel[i, 1] < 60:\n",
    "        trainlabel[i, 1] = 3\n",
    "    else:\n",
    "        trainlabel[i, 1] = 4\n",
    "\n",
    "for i in range(valLabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if valLabel[i, 0] < 10000:\n",
    "        valLabel[i, 0] = 1\n",
    "    elif valLabel[i, 0] < 50000:\n",
    "        valLabel[i, 0] = 2\n",
    "    elif valLabel[i, 0] < 100000:\n",
    "        valLabel[i, 0] = 3\n",
    "    elif valLabel[i, 0] < 1000000:\n",
    "        valLabel[i, 0] = 4\n",
    "    else:\n",
    "        valLabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if valLabel[i, 1] < 10:\n",
    "        valLabel[i, 1] = 1\n",
    "    elif valLabel[i, 1] < 30:\n",
    "        valLabel[i, 1] = 2\n",
    "    elif valLabel[i, 1] < 60:\n",
    "        valLabel[i, 1] = 3\n",
    "    else:\n",
    "        valLabel[i, 1] = 4\n",
    "\n",
    "\n",
    "for i in range(testLabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if testLabel[i, 0] < 10000:\n",
    "        testLabel[i, 0] = 1\n",
    "    elif testLabel[i, 0] < 50000:\n",
    "        testLabel[i, 0] = 2\n",
    "    elif testLabel[i, 0] < 100000:\n",
    "        testLabel[i, 0] = 3\n",
    "    elif testLabel[i, 0] < 1000000:\n",
    "        testLabel[i, 0] = 4\n",
    "    else:\n",
    "        testLabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if testLabel[i, 1] < 10:\n",
    "        testLabel[i, 1] = 1\n",
    "    elif testLabel[i, 1] < 30:\n",
    "        testLabel[i, 1] = 2\n",
    "    elif testLabel[i, 1] < 60:\n",
    "        testLabel[i, 1] = 3\n",
    "    else:\n",
    "        testLabel[i, 1] = 4\n",
    "\n",
    "\n",
    "train_size = trainlabel.shape[0]\n",
    "Y_train1 = np.zeros((train_size,5))\n",
    "Y_train1[np.arange(train_size),trainlabel[:,0]-1] = 1\n",
    "Y_train2 = np.zeros((train_size,4))\n",
    "Y_train2[np.arange(train_size),trainlabel[:,1]-1] = 1\n",
    "Y_train3 = np.zeros((train_size,5))\n",
    "Y_train3[np.arange(train_size),trainlabel[:,2]-1] = 1\n",
    "\n",
    "val_size = valLabel.shape[0]\n",
    "Y_val1 = np.zeros((val_size,5))\n",
    "Y_val1[np.arange(val_size),valLabel[:,0]-1] = 1\n",
    "Y_val2 = np.zeros((val_size,4))\n",
    "Y_val2[np.arange(val_size),valLabel[:,1]-1] = 1\n",
    "Y_val3 = np.zeros((val_size,5))\n",
    "Y_val3[np.arange(val_size),valLabel[:,2]-1] = 1\n",
    "\n",
    "test_size = testLabel.shape[0]\n",
    "Y_test1 = np.zeros((test_size,5))\n",
    "Y_test1[np.arange(test_size),testLabel[:,0]-1] = 1\n",
    "Y_test2 = np.zeros((test_size,4))\n",
    "Y_test2[np.arange(test_size),testLabel[:,1]-1] = 1\n",
    "Y_test3 = np.zeros((test_size,5))\n",
    "Y_test3[np.arange(test_size),testLabel[:,2]-1] = 1\n",
    "\n",
    "# trainData = np.expand_dims(trainData, axis=-1)\n",
    "# testData = np.expand_dims(testData, axis=-1)\n",
    "trainData = trainData.reshape((trainData.shape[0], timestep, 2))\n",
    "testData = testData.reshape((testData.shape[0], timestep, 2))\n",
    "valData = valData.reshape((valData.shape[0], timestep, 2))\n",
    "\n",
    "\n",
    "def improved_model():\n",
    "\n",
    "    model_input = Input(shape=(timestep,2))\n",
    "    mask_input = Input(shape=(256,))\n",
    "\n",
    "    x = Conv1D(32, 3, activation='relu')(model_input)\n",
    "    x = Conv1D(32, 3, activation='relu')(x)\n",
    "    x = MaxPooling1D(pool_size=(2))(x)\n",
    "\n",
    "    x = Conv1D(64, 3, activation='relu')(x)\n",
    "    x = Conv1D(64, 3, activation='relu')(x)\n",
    "    x = MaxPooling1D(pool_size=(2))(x)\n",
    "\n",
    "    x = Conv1D(128, 3, activation='relu')(x)\n",
    "    x = Conv1D(128, 3, activation='relu')(x)\n",
    "    x = MaxPooling1D(pool_size=(2))(x)\n",
    "\n",
    "    x = Flatten()(x)\n",
    "\n",
    "    x = Dense(256)(x)\n",
    "    x = Activation('relu')(x)\n",
    "    x = Dense(256)(x)\n",
    "    x = Activation('relu')(x)\n",
    "\n",
    "    output1 = Dense(5, activation='softmax', name='Bandwidth')(x)\n",
    "\n",
    "    output2 = Dense(4, activation='softmax', name='Duration')(x)\n",
    "\n",
    "    x3 = multiply([x,mask_input])\n",
    "    output3 = Dense(5, activation='softmax', name='Class')(x3)\n",
    "\n",
    "    model = Model(inputs=[model_input,mask_input], outputs=[output1, output2, output3])\n",
    "    opt = Adam(clipnorm = 1.)\n",
    "    model.compile(loss=['categorical_crossentropy', 'categorical_crossentropy', 'categorical_crossentropy'], loss_weights=[1,1,lambda_value], optimizer=opt, metrics=['accuracy'])\n",
    "\n",
    "    return model\n",
    "\n",
    "model = improved_model()\n",
    "\n",
    "model.fit([trainData,trainmask], [Y_train1, Y_train2, Y_train3],\n",
    "          validation_data = ([valData, valmask], [Y_val1, Y_val2, Y_val3]),\n",
    "          batch_size = 64, epochs = 20, verbose = True, shuffle = True)\n",
    "\n",
    "result = model.evaluate([testData, testmask], [Y_test1, Y_test2, Y_test3])\n",
    "print(result)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "unmasked samples:  100.0\n",
      "Epoch 1/20\n",
      "96/96 [==============================] - 2s 8ms/step - loss: 4.0661 - Bandwidth_loss: 1.2538 - Duration_loss: 1.2135 - Class_loss: 1.5988 - Bandwidth_accuracy: 0.4993 - Duration_accuracy: 0.4330 - Class_accuracy: 0.2916 - val_loss: 4.1277 - val_Bandwidth_loss: 1.3241 - val_Duration_loss: 1.2781 - val_Class_loss: 1.5255 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.4267 - val_Class_accuracy: 0.2667\n",
      "Epoch 2/20\n",
      "96/96 [==============================] - 1s 6ms/step - loss: 3.9105 - Bandwidth_loss: 1.1824 - Duration_loss: 1.1506 - Class_loss: 1.5775 - Bandwidth_accuracy: 0.5157 - Duration_accuracy: 0.4667 - Class_accuracy: 0.3036 - val_loss: 4.0738 - val_Bandwidth_loss: 1.3066 - val_Duration_loss: 1.3248 - val_Class_loss: 1.4424 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3533 - val_Class_accuracy: 0.2800\n",
      "Epoch 3/20\n",
      "96/96 [==============================] - 1s 6ms/step - loss: 3.8569 - Bandwidth_loss: 1.1649 - Duration_loss: 1.1295 - Class_loss: 1.5624 - Bandwidth_accuracy: 0.5208 - Duration_accuracy: 0.4812 - Class_accuracy: 0.3044 - val_loss: 3.9203 - val_Bandwidth_loss: 1.2836 - val_Duration_loss: 1.2540 - val_Class_loss: 1.3828 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.4333 - val_Class_accuracy: 0.3667\n",
      "Epoch 4/20\n",
      "96/96 [==============================] - 1s 6ms/step - loss: 3.8269 - Bandwidth_loss: 1.1528 - Duration_loss: 1.1217 - Class_loss: 1.5525 - Bandwidth_accuracy: 0.5301 - Duration_accuracy: 0.4872 - Class_accuracy: 0.3048 - val_loss: 3.8694 - val_Bandwidth_loss: 1.2799 - val_Duration_loss: 1.2314 - val_Class_loss: 1.3581 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.4333 - val_Class_accuracy: 0.3600\n",
      "Epoch 5/20\n",
      "96/96 [==============================] - 1s 6ms/step - loss: 3.8107 - Bandwidth_loss: 1.1451 - Duration_loss: 1.1206 - Class_loss: 1.5450 - Bandwidth_accuracy: 0.5284 - Duration_accuracy: 0.4975 - Class_accuracy: 0.3051 - val_loss: 3.9149 - val_Bandwidth_loss: 1.3154 - val_Duration_loss: 1.2447 - val_Class_loss: 1.3547 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.4333 - val_Class_accuracy: 0.3667\n",
      "Epoch 6/20\n",
      "96/96 [==============================] - 1s 6ms/step - loss: 3.7817 - Bandwidth_loss: 1.1350 - Duration_loss: 1.1070 - Class_loss: 1.5397 - Bandwidth_accuracy: 0.5314 - Duration_accuracy: 0.4993 - Class_accuracy: 0.3049 - val_loss: 3.9919 - val_Bandwidth_loss: 1.3356 - val_Duration_loss: 1.2969 - val_Class_loss: 1.3593 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.3400 - val_Class_accuracy: 0.3667\n",
      "Epoch 7/20\n",
      "96/96 [==============================] - 1s 6ms/step - loss: 3.7806 - Bandwidth_loss: 1.1372 - Duration_loss: 1.1070 - Class_loss: 1.5364 - Bandwidth_accuracy: 0.5310 - Duration_accuracy: 0.5001 - Class_accuracy: 0.3053 - val_loss: 3.9142 - val_Bandwidth_loss: 1.3056 - val_Duration_loss: 1.2541 - val_Class_loss: 1.3545 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.4400 - val_Class_accuracy: 0.3667\n",
      "Epoch 8/20\n",
      "96/96 [==============================] - 1s 7ms/step - loss: 3.7663 - Bandwidth_loss: 1.1320 - Duration_loss: 1.1005 - Class_loss: 1.5338 - Bandwidth_accuracy: 0.5343 - Duration_accuracy: 0.5015 - Class_accuracy: 0.3069 - val_loss: 3.9171 - val_Bandwidth_loss: 1.3179 - val_Duration_loss: 1.2506 - val_Class_loss: 1.3487 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.4400 - val_Class_accuracy: 0.3600\n",
      "Epoch 9/20\n",
      "96/96 [==============================] - 1s 6ms/step - loss: 3.7485 - Bandwidth_loss: 1.1229 - Duration_loss: 1.0933 - Class_loss: 1.5323 - Bandwidth_accuracy: 0.5358 - Duration_accuracy: 0.5056 - Class_accuracy: 0.3056 - val_loss: 3.9500 - val_Bandwidth_loss: 1.3365 - val_Duration_loss: 1.2593 - val_Class_loss: 1.3542 - val_Bandwidth_accuracy: 0.4467 - val_Duration_accuracy: 0.4467 - val_Class_accuracy: 0.3600\n",
      "5/5 [==============================] - 0s 2ms/step - loss: 3.6936 - Bandwidth_loss: 1.2299 - Duration_loss: 1.2337 - Class_loss: 1.2300 - Bandwidth_accuracy: 0.4667 - Duration_accuracy: 0.4400 - Class_accuracy: 0.4267\n",
      "[3.6935677528381348, 1.2298824787139893, 1.2336955070495605, 1.2299894094467163, 0.46666666865348816, 0.4399999976158142, 0.4266666769981384]\n"
     ]
    }
   ],
   "source": [
    "\"\"\" 使用早停法可以在验证损失停止改善时停止训练，从而防止过拟合。在Keras中，我们可以使用EarlyStopping回调函数来实现这个功能。\n",
    "下面是一个例子，其中patience参数是用来确定在验证损失停止改善后，我们需要等待多少个epoch才停止训练。\n",
    "在这个例子中，如果验证损失在5个连续的epoch中都没有改善，训练就会停止。这样，我们就能避免在训练集上过度拟合，\n",
    "从而提高我们模型在未见过的数据上的表现。\"\"\"\n",
    "import numpy as np\n",
    "from tensorflow.keras.models import Model\n",
    "from tensorflow.keras.layers import Dense, Dropout, Activation, multiply, Input, Flatten\n",
    "from tensorflow.keras.layers import LayerNormalization, MultiHeadAttention\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "\n",
    "timestep = 60\n",
    "np.random.seed(10)\n",
    "\n",
    "num_class = 5\n",
    "train_sample_per_class = 20\n",
    "lambda_value = 1\n",
    "\n",
    "\n",
    "trainData = np.load(\"trainData.npy\")\n",
    "trainlabel = np.load(\"trainLabel.npy\")\n",
    "# trainData = trainData[:, -timestep*2:]\n",
    "# trainlabel = trainlabel[:, -timestep*2:]\n",
    "trainData = trainData[:, :timestep*2]\n",
    "trainlabel = trainlabel[:, :timestep*2]\n",
    "trainlabel = trainlabel.astype(int)\n",
    "\n",
    "trainmask = np.zeros((trainlabel.shape[0],256))\n",
    "\n",
    "class_counter = np.zeros((num_class))\n",
    "train_size = trainlabel.shape[0]\n",
    "j = 0\n",
    "for i in range(train_size):\n",
    "    class_id = trainlabel[i,2] - 1\n",
    "    if class_counter[class_id] < train_sample_per_class:\n",
    "        trainmask[i, :] = 1\n",
    "        j += 1\n",
    "        class_counter[class_id] += 1\n",
    "print(\"unmasked samples: \", str(np.sum(trainmask==1)/256))\n",
    "\n",
    "\n",
    "valData = np.load(\"valData.npy\")\n",
    "valLabel = np.load(\"valLabel.npy\")\n",
    "# testData = testData[:, -timestep*2:]\n",
    "# testLabel = testLabel[:, -timestep*2:]\n",
    "valData = valData[:, :timestep*2]\n",
    "valLabel = valLabel[:, :timestep*2]\n",
    "\n",
    "valLabel = valLabel.astype(int)\n",
    "valmask = np.ones((valLabel.shape[0], 256))\n",
    "valmask[:,:]=1\n",
    "\n",
    "\n",
    "testData = np.load(\"testData.npy\")\n",
    "testLabel = np.load(\"testLabel.npy\")\n",
    "# testData = testData[:, -timestep*2:]\n",
    "# testLabel = testLabel[:, -timestep*2:]\n",
    "testData = testData[:, :timestep*2]\n",
    "testLabel = testLabel[:, :timestep*2]\n",
    "\n",
    "testLabel = testLabel.astype(int)\n",
    "testmask = np.ones((testLabel.shape[0], 256))\n",
    "testmask[:,:]=1\n",
    "\n",
    "for i in range(trainlabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if trainlabel[i, 0] < 10000:\n",
    "        trainlabel[i, 0] = 1\n",
    "    elif trainlabel[i, 0] < 50000:\n",
    "        trainlabel[i, 0] = 2\n",
    "    elif trainlabel[i, 0] < 100000:\n",
    "        trainlabel[i, 0] = 3\n",
    "    elif trainlabel[i, 0] < 1000000:\n",
    "        trainlabel[i, 0] = 4\n",
    "    else:\n",
    "        trainlabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if trainlabel[i, 1] < 10:\n",
    "        trainlabel[i, 1] = 1\n",
    "    elif trainlabel[i, 1] < 30:\n",
    "        trainlabel[i, 1] = 2\n",
    "    elif trainlabel[i, 1] < 60:\n",
    "        trainlabel[i, 1] = 3\n",
    "    else:\n",
    "        trainlabel[i, 1] = 4\n",
    "\n",
    "for i in range(valLabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if valLabel[i, 0] < 10000:\n",
    "        valLabel[i, 0] = 1\n",
    "    elif valLabel[i, 0] < 50000:\n",
    "        valLabel[i, 0] = 2\n",
    "    elif valLabel[i, 0] < 100000:\n",
    "        valLabel[i, 0] = 3\n",
    "    elif valLabel[i, 0] < 1000000:\n",
    "        valLabel[i, 0] = 4\n",
    "    else:\n",
    "        valLabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if valLabel[i, 1] < 10:\n",
    "        valLabel[i, 1] = 1\n",
    "    elif valLabel[i, 1] < 30:\n",
    "        valLabel[i, 1] = 2\n",
    "    elif valLabel[i, 1] < 60:\n",
    "        valLabel[i, 1] = 3\n",
    "    else:\n",
    "        valLabel[i, 1] = 4\n",
    "\n",
    "\n",
    "for i in range(testLabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if testLabel[i, 0] < 10000:\n",
    "        testLabel[i, 0] = 1\n",
    "    elif testLabel[i, 0] < 50000:\n",
    "        testLabel[i, 0] = 2\n",
    "    elif testLabel[i, 0] < 100000:\n",
    "        testLabel[i, 0] = 3\n",
    "    elif testLabel[i, 0] < 1000000:\n",
    "        testLabel[i, 0] = 4\n",
    "    else:\n",
    "        testLabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if testLabel[i, 1] < 10:\n",
    "        testLabel[i, 1] = 1\n",
    "    elif testLabel[i, 1] < 30:\n",
    "        testLabel[i, 1] = 2\n",
    "    elif testLabel[i, 1] < 60:\n",
    "        testLabel[i, 1] = 3\n",
    "    else:\n",
    "        testLabel[i, 1] = 4\n",
    "\n",
    "\n",
    "train_size = trainlabel.shape[0]\n",
    "Y_train1 = np.zeros((train_size,5))\n",
    "Y_train1[np.arange(train_size),trainlabel[:,0]-1] = 1\n",
    "Y_train2 = np.zeros((train_size,4))\n",
    "Y_train2[np.arange(train_size),trainlabel[:,1]-1] = 1\n",
    "Y_train3 = np.zeros((train_size,5))\n",
    "Y_train3[np.arange(train_size),trainlabel[:,2]-1] = 1\n",
    "\n",
    "val_size = valLabel.shape[0]\n",
    "Y_val1 = np.zeros((val_size,5))\n",
    "Y_val1[np.arange(val_size),valLabel[:,0]-1] = 1\n",
    "Y_val2 = np.zeros((val_size,4))\n",
    "Y_val2[np.arange(val_size),valLabel[:,1]-1] = 1\n",
    "Y_val3 = np.zeros((val_size,5))\n",
    "Y_val3[np.arange(val_size),valLabel[:,2]-1] = 1\n",
    "\n",
    "test_size = testLabel.shape[0]\n",
    "Y_test1 = np.zeros((test_size,5))\n",
    "Y_test1[np.arange(test_size),testLabel[:,0]-1] = 1\n",
    "Y_test2 = np.zeros((test_size,4))\n",
    "Y_test2[np.arange(test_size),testLabel[:,1]-1] = 1\n",
    "Y_test3 = np.zeros((test_size,5))\n",
    "Y_test3[np.arange(test_size),testLabel[:,2]-1] = 1\n",
    "\n",
    "# trainData = np.expand_dims(trainData, axis=-1)\n",
    "# testData = np.expand_dims(testData, axis=-1)\n",
    "trainData = trainData.reshape((trainData.shape[0], timestep, 2))\n",
    "testData = testData.reshape((testData.shape[0], timestep, 2))\n",
    "valData = valData.reshape((valData.shape[0], timestep, 2))\n",
    "\n",
    "\n",
    "def base_model():\n",
    "    model_input = Input(shape=(timestep,2))\n",
    "    mask_input = Input(shape=(256,))\n",
    "\n",
    "    # Transformer Layer\n",
    "    x = MultiHeadAttention(num_heads=2, key_dim=2)(model_input, model_input)\n",
    "    x = Dropout(0.1)(x)\n",
    "    x = LayerNormalization(epsilon=1e-6)(x + model_input)\n",
    "\n",
    "    x = Flatten()(x)\n",
    "\n",
    "    x = Dense(256)(x)\n",
    "    x = Activation('relu')(x)\n",
    "    x = Dense(256)(x)\n",
    "    x = Activation('relu')(x)\n",
    "\n",
    "    output1 = Dense(5, activation='softmax', name='Bandwidth')(x)\n",
    "    output2 = Dense(4, activation='softmax', name='Duration')(x)\n",
    "    x3 = multiply([x, mask_input])\n",
    "    output3 = Dense(5, activation='softmax', name='Class')(x3)\n",
    "\n",
    "    model = Model(inputs=[model_input, mask_input], outputs=[output1, output2, output3])\n",
    "\n",
    "    opt = Adam(clipnorm=1.)\n",
    "    model.compile(loss=['categorical_crossentropy', 'categorical_crossentropy', 'categorical_crossentropy'], \n",
    "                  loss_weights=[1, 1, lambda_value], optimizer=opt, metrics=['accuracy'])\n",
    "    return model\n",
    "\n",
    "def base_model():\n",
    "    model_input = Input(shape=(timestep,2))\n",
    "    mask_input = Input(shape=(256,))\n",
    "\n",
    "    # Transformer Layer\n",
    "    x = MultiHeadAttention(num_heads=2, key_dim=2)(model_input, model_input)\n",
    "    x = Dropout(0.1)(x)\n",
    "    x = LayerNormalization(epsilon=1e-6)(x + model_input)\n",
    "\n",
    "    x = Flatten()(x)\n",
    "\n",
    "    x = Dense(256)(x)\n",
    "    x = Activation('relu')(x)\n",
    "    x = Dense(256)(x)\n",
    "    x = Activation('relu')(x)\n",
    "\n",
    "    output1 = Dense(5, activation='softmax', name='Bandwidth')(x)\n",
    "    output2 = Dense(4, activation='softmax', name='Duration')(x)\n",
    "    x3 = multiply([x, mask_input])\n",
    "    output3 = Dense(5, activation='softmax', name='Class')(x3)\n",
    "\n",
    "    model = Model(inputs=[model_input, mask_input], outputs=[output1, output2, output3])\n",
    "\n",
    "    opt = Adam(clipnorm=1.)\n",
    "    model.compile(loss=['categorical_crossentropy', 'categorical_crossentropy', 'categorical_crossentropy'], \n",
    "                  loss_weights=[1, 1, lambda_value], optimizer=opt, metrics=['accuracy'])\n",
    "    return model\n",
    "\n",
    "model = base_model()\n",
    "\n",
    "# Define EarlyStopping callback\n",
    "early_stopping_callback = EarlyStopping(monitor='val_loss', patience=5)\n",
    "\n",
    "model.fit([trainData, trainmask], [Y_train1, Y_train2, Y_train3], validation_data=([valData, valmask], [Y_val1, Y_val2, Y_val3]), \n",
    "          batch_size=64, epochs=20, verbose=True, shuffle=True, callbacks=[early_stopping_callback])\n",
    "\n",
    "result = model.evaluate([testData, testmask], [Y_test1, Y_test2, Y_test3])\n",
    "print(result)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "unmasked samples:  100.0\n",
      "Epoch 1/20\n",
      "96/96 [==============================] - 1s 4ms/step - loss: 3.5046 - Bandwidth_loss: 0.9441 - Duration_loss: 0.9621 - Class_loss: 1.5984 - Bandwidth_accuracy: 0.6205 - Duration_accuracy: 0.5779 - Class_accuracy: 0.3020 - val_loss: 2.8341 - val_Bandwidth_loss: 0.8018 - val_Duration_loss: 0.9018 - val_Class_loss: 1.1305 - val_Bandwidth_accuracy: 0.6533 - val_Duration_accuracy: 0.6400 - val_Class_accuracy: 0.6533\n",
      "Epoch 2/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.8861 - Bandwidth_loss: 0.6323 - Duration_loss: 0.6866 - Class_loss: 1.5671 - Bandwidth_accuracy: 0.7459 - Duration_accuracy: 0.7187 - Class_accuracy: 0.3113 - val_loss: 2.2250 - val_Bandwidth_loss: 0.6513 - val_Duration_loss: 0.7341 - val_Class_loss: 0.8396 - val_Bandwidth_accuracy: 0.7000 - val_Duration_accuracy: 0.7333 - val_Class_accuracy: 0.6733\n",
      "Epoch 3/20\n",
      "96/96 [==============================] - 0s 1ms/step - loss: 2.7209 - Bandwidth_loss: 0.5378 - Duration_loss: 0.6330 - Class_loss: 1.5501 - Bandwidth_accuracy: 0.7864 - Duration_accuracy: 0.7343 - Class_accuracy: 0.3129 - val_loss: 1.9727 - val_Bandwidth_loss: 0.6104 - val_Duration_loss: 0.6868 - val_Class_loss: 0.6755 - val_Bandwidth_accuracy: 0.7600 - val_Duration_accuracy: 0.7333 - val_Class_accuracy: 0.7733\n",
      "Epoch 4/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.6012 - Bandwidth_loss: 0.4757 - Duration_loss: 0.5864 - Class_loss: 1.5391 - Bandwidth_accuracy: 0.8073 - Duration_accuracy: 0.7565 - Class_accuracy: 0.3132 - val_loss: 1.7773 - val_Bandwidth_loss: 0.5309 - val_Duration_loss: 0.6269 - val_Class_loss: 0.6195 - val_Bandwidth_accuracy: 0.7800 - val_Duration_accuracy: 0.7667 - val_Class_accuracy: 0.8200\n",
      "Epoch 5/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.5046 - Bandwidth_loss: 0.4280 - Duration_loss: 0.5452 - Class_loss: 1.5313 - Bandwidth_accuracy: 0.8309 - Duration_accuracy: 0.7706 - Class_accuracy: 0.3134 - val_loss: 2.0804 - val_Bandwidth_loss: 0.6405 - val_Duration_loss: 0.7912 - val_Class_loss: 0.6486 - val_Bandwidth_accuracy: 0.7600 - val_Duration_accuracy: 0.6067 - val_Class_accuracy: 0.7333\n",
      "Epoch 6/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.4804 - Bandwidth_loss: 0.4215 - Duration_loss: 0.5333 - Class_loss: 1.5256 - Bandwidth_accuracy: 0.8317 - Duration_accuracy: 0.7706 - Class_accuracy: 0.3132 - val_loss: 2.4322 - val_Bandwidth_loss: 0.6903 - val_Duration_loss: 0.8688 - val_Class_loss: 0.8731 - val_Bandwidth_accuracy: 0.7200 - val_Duration_accuracy: 0.6467 - val_Class_accuracy: 0.6800\n",
      "Epoch 7/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.4044 - Bandwidth_loss: 0.3803 - Duration_loss: 0.5024 - Class_loss: 1.5217 - Bandwidth_accuracy: 0.8513 - Duration_accuracy: 0.7790 - Class_accuracy: 0.3141 - val_loss: 1.7105 - val_Bandwidth_loss: 0.5456 - val_Duration_loss: 0.6080 - val_Class_loss: 0.5569 - val_Bandwidth_accuracy: 0.8133 - val_Duration_accuracy: 0.7733 - val_Class_accuracy: 0.8267\n",
      "Epoch 8/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.3940 - Bandwidth_loss: 0.3802 - Duration_loss: 0.4951 - Class_loss: 1.5187 - Bandwidth_accuracy: 0.8552 - Duration_accuracy: 0.7832 - Class_accuracy: 0.3137 - val_loss: 2.3257 - val_Bandwidth_loss: 0.7501 - val_Duration_loss: 0.8554 - val_Class_loss: 0.7202 - val_Bandwidth_accuracy: 0.7267 - val_Duration_accuracy: 0.7133 - val_Class_accuracy: 0.7533\n",
      "Epoch 9/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.3649 - Bandwidth_loss: 0.3632 - Duration_loss: 0.4846 - Class_loss: 1.5170 - Bandwidth_accuracy: 0.8552 - Duration_accuracy: 0.7856 - Class_accuracy: 0.3142 - val_loss: 1.4424 - val_Bandwidth_loss: 0.4232 - val_Duration_loss: 0.5482 - val_Class_loss: 0.4710 - val_Bandwidth_accuracy: 0.8400 - val_Duration_accuracy: 0.7933 - val_Class_accuracy: 0.8400\n",
      "Epoch 10/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.3071 - Bandwidth_loss: 0.3308 - Duration_loss: 0.4619 - Class_loss: 1.5143 - Bandwidth_accuracy: 0.8712 - Duration_accuracy: 0.7923 - Class_accuracy: 0.3142 - val_loss: 1.5777 - val_Bandwidth_loss: 0.4837 - val_Duration_loss: 0.5825 - val_Class_loss: 0.5115 - val_Bandwidth_accuracy: 0.8200 - val_Duration_accuracy: 0.7533 - val_Class_accuracy: 0.8000\n",
      "Epoch 11/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.2895 - Bandwidth_loss: 0.3252 - Duration_loss: 0.4508 - Class_loss: 1.5135 - Bandwidth_accuracy: 0.8716 - Duration_accuracy: 0.7961 - Class_accuracy: 0.3144 - val_loss: 1.6155 - val_Bandwidth_loss: 0.4672 - val_Duration_loss: 0.5938 - val_Class_loss: 0.5545 - val_Bandwidth_accuracy: 0.8400 - val_Duration_accuracy: 0.7667 - val_Class_accuracy: 0.8133\n",
      "Epoch 12/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.2698 - Bandwidth_loss: 0.3109 - Duration_loss: 0.4462 - Class_loss: 1.5127 - Bandwidth_accuracy: 0.8785 - Duration_accuracy: 0.7939 - Class_accuracy: 0.3142 - val_loss: 1.5700 - val_Bandwidth_loss: 0.4764 - val_Duration_loss: 0.6036 - val_Class_loss: 0.4899 - val_Bandwidth_accuracy: 0.8200 - val_Duration_accuracy: 0.7733 - val_Class_accuracy: 0.8600\n",
      "Epoch 13/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.2605 - Bandwidth_loss: 0.3109 - Duration_loss: 0.4382 - Class_loss: 1.5114 - Bandwidth_accuracy: 0.8738 - Duration_accuracy: 0.7996 - Class_accuracy: 0.3147 - val_loss: 1.3227 - val_Bandwidth_loss: 0.3865 - val_Duration_loss: 0.4844 - val_Class_loss: 0.4518 - val_Bandwidth_accuracy: 0.8200 - val_Duration_accuracy: 0.7733 - val_Class_accuracy: 0.8600\n",
      "Epoch 14/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.2421 - Bandwidth_loss: 0.3022 - Duration_loss: 0.4282 - Class_loss: 1.5117 - Bandwidth_accuracy: 0.8819 - Duration_accuracy: 0.8003 - Class_accuracy: 0.3142 - val_loss: 1.3407 - val_Bandwidth_loss: 0.4023 - val_Duration_loss: 0.4958 - val_Class_loss: 0.4426 - val_Bandwidth_accuracy: 0.8200 - val_Duration_accuracy: 0.7733 - val_Class_accuracy: 0.8200\n",
      "Epoch 15/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.2088 - Bandwidth_loss: 0.2797 - Duration_loss: 0.4185 - Class_loss: 1.5106 - Bandwidth_accuracy: 0.8837 - Duration_accuracy: 0.8050 - Class_accuracy: 0.3145 - val_loss: 1.3948 - val_Bandwidth_loss: 0.4198 - val_Duration_loss: 0.5196 - val_Class_loss: 0.4555 - val_Bandwidth_accuracy: 0.8267 - val_Duration_accuracy: 0.7733 - val_Class_accuracy: 0.8467\n",
      "Epoch 16/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.2213 - Bandwidth_loss: 0.2977 - Duration_loss: 0.4126 - Class_loss: 1.5110 - Bandwidth_accuracy: 0.8809 - Duration_accuracy: 0.8127 - Class_accuracy: 0.3142 - val_loss: 1.3427 - val_Bandwidth_loss: 0.3778 - val_Duration_loss: 0.5005 - val_Class_loss: 0.4644 - val_Bandwidth_accuracy: 0.8467 - val_Duration_accuracy: 0.7867 - val_Class_accuracy: 0.8667\n",
      "Epoch 17/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.1912 - Bandwidth_loss: 0.2737 - Duration_loss: 0.4068 - Class_loss: 1.5106 - Bandwidth_accuracy: 0.8881 - Duration_accuracy: 0.8107 - Class_accuracy: 0.3142 - val_loss: 1.3728 - val_Bandwidth_loss: 0.4164 - val_Duration_loss: 0.5131 - val_Class_loss: 0.4433 - val_Bandwidth_accuracy: 0.8267 - val_Duration_accuracy: 0.7800 - val_Class_accuracy: 0.8533\n",
      "Epoch 18/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.1772 - Bandwidth_loss: 0.2663 - Duration_loss: 0.3996 - Class_loss: 1.5113 - Bandwidth_accuracy: 0.8892 - Duration_accuracy: 0.8086 - Class_accuracy: 0.3142 - val_loss: 1.4789 - val_Bandwidth_loss: 0.4480 - val_Duration_loss: 0.5570 - val_Class_loss: 0.4740 - val_Bandwidth_accuracy: 0.8200 - val_Duration_accuracy: 0.7600 - val_Class_accuracy: 0.8133\n",
      "Epoch 19/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.1522 - Bandwidth_loss: 0.2548 - Duration_loss: 0.3869 - Class_loss: 1.5105 - Bandwidth_accuracy: 0.8940 - Duration_accuracy: 0.8132 - Class_accuracy: 0.3145 - val_loss: 1.3764 - val_Bandwidth_loss: 0.4104 - val_Duration_loss: 0.5226 - val_Class_loss: 0.4434 - val_Bandwidth_accuracy: 0.8467 - val_Duration_accuracy: 0.7867 - val_Class_accuracy: 0.8800\n",
      "Epoch 20/20\n",
      "96/96 [==============================] - 0s 2ms/step - loss: 2.1500 - Bandwidth_loss: 0.2585 - Duration_loss: 0.3817 - Class_loss: 1.5097 - Bandwidth_accuracy: 0.8972 - Duration_accuracy: 0.8154 - Class_accuracy: 0.3145 - val_loss: 1.5489 - val_Bandwidth_loss: 0.4630 - val_Duration_loss: 0.5902 - val_Class_loss: 0.4957 - val_Bandwidth_accuracy: 0.8333 - val_Duration_accuracy: 0.7667 - val_Class_accuracy: 0.8400\n",
      "5/5 [==============================] - 0s 1ms/step - loss: 1.3708 - Bandwidth_loss: 0.3276 - Duration_loss: 0.6816 - Class_loss: 0.3616 - Bandwidth_accuracy: 0.9067 - Duration_accuracy: 0.7000 - Class_accuracy: 0.9267\n",
      "[1.3707976341247559, 0.32755497097969055, 0.6816233396530151, 0.36161941289901733, 0.9066666960716248, 0.699999988079071, 0.9266666769981384]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "from keras.layers import Flatten\n",
    "from tensorflow.keras.layers import Input, Dense, Multiply, Reshape\n",
    "from tensorflow.keras.models import Model\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras import backend as K\n",
    "\n",
    "timestep = 60\n",
    "np.random.seed(10)\n",
    "\n",
    "num_class = 5\n",
    "train_sample_per_class = 20\n",
    "lambda_value = 1\n",
    "\n",
    "\n",
    "trainData = np.load(\"trainData.npy\")\n",
    "trainlabel = np.load(\"trainLabel.npy\")\n",
    "# trainData = trainData[:, -timestep*2:]\n",
    "# trainlabel = trainlabel[:, -timestep*2:]\n",
    "trainData = trainData[:, :timestep*2]\n",
    "trainlabel = trainlabel[:, :timestep*2]\n",
    "trainlabel = trainlabel.astype(int)\n",
    "\n",
    "trainmask = np.zeros((trainlabel.shape[0],256))\n",
    "\n",
    "class_counter = np.zeros((num_class))\n",
    "train_size = trainlabel.shape[0]\n",
    "j = 0\n",
    "for i in range(train_size):\n",
    "    class_id = trainlabel[i,2] - 1\n",
    "    if class_counter[class_id] < train_sample_per_class:\n",
    "        trainmask[i, :] = 1\n",
    "        j += 1\n",
    "        class_counter[class_id] += 1\n",
    "print(\"unmasked samples: \", str(np.sum(trainmask==1)/256))\n",
    "\n",
    "\n",
    "valData = np.load(\"valData.npy\")\n",
    "valLabel = np.load(\"valLabel.npy\")\n",
    "# testData = testData[:, -timestep*2:]\n",
    "# testLabel = testLabel[:, -timestep*2:]\n",
    "valData = valData[:, :timestep*2]\n",
    "valLabel = valLabel[:, :timestep*2]\n",
    "\n",
    "valLabel = valLabel.astype(int)\n",
    "valmask = np.ones((valLabel.shape[0], 256))\n",
    "valmask[:,:]=1\n",
    "\n",
    "\n",
    "testData = np.load(\"testData.npy\")\n",
    "testLabel = np.load(\"testLabel.npy\")\n",
    "# testData = testData[:, -timestep*2:]\n",
    "# testLabel = testLabel[:, -timestep*2:]\n",
    "testData = testData[:, :timestep*2]\n",
    "testLabel = testLabel[:, :timestep*2]\n",
    "\n",
    "testLabel = testLabel.astype(int)\n",
    "testmask = np.ones((testLabel.shape[0], 256))\n",
    "testmask[:,:]=1\n",
    "\n",
    "for i in range(trainlabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if trainlabel[i, 0] < 10000:\n",
    "        trainlabel[i, 0] = 1\n",
    "    elif trainlabel[i, 0] < 50000:\n",
    "        trainlabel[i, 0] = 2\n",
    "    elif trainlabel[i, 0] < 100000:\n",
    "        trainlabel[i, 0] = 3\n",
    "    elif trainlabel[i, 0] < 1000000:\n",
    "        trainlabel[i, 0] = 4\n",
    "    else:\n",
    "        trainlabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if trainlabel[i, 1] < 10:\n",
    "        trainlabel[i, 1] = 1\n",
    "    elif trainlabel[i, 1] < 30:\n",
    "        trainlabel[i, 1] = 2\n",
    "    elif trainlabel[i, 1] < 60:\n",
    "        trainlabel[i, 1] = 3\n",
    "    else:\n",
    "        trainlabel[i, 1] = 4\n",
    "\n",
    "for i in range(valLabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if valLabel[i, 0] < 10000:\n",
    "        valLabel[i, 0] = 1\n",
    "    elif valLabel[i, 0] < 50000:\n",
    "        valLabel[i, 0] = 2\n",
    "    elif valLabel[i, 0] < 100000:\n",
    "        valLabel[i, 0] = 3\n",
    "    elif valLabel[i, 0] < 1000000:\n",
    "        valLabel[i, 0] = 4\n",
    "    else:\n",
    "        valLabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if valLabel[i, 1] < 10:\n",
    "        valLabel[i, 1] = 1\n",
    "    elif valLabel[i, 1] < 30:\n",
    "        valLabel[i, 1] = 2\n",
    "    elif valLabel[i, 1] < 60:\n",
    "        valLabel[i, 1] = 3\n",
    "    else:\n",
    "        valLabel[i, 1] = 4\n",
    "\n",
    "\n",
    "for i in range(testLabel.shape[0]):\n",
    "    #Categorizing Bandwidth\n",
    "    if testLabel[i, 0] < 10000:\n",
    "        testLabel[i, 0] = 1\n",
    "    elif testLabel[i, 0] < 50000:\n",
    "        testLabel[i, 0] = 2\n",
    "    elif testLabel[i, 0] < 100000:\n",
    "        testLabel[i, 0] = 3\n",
    "    elif testLabel[i, 0] < 1000000:\n",
    "        testLabel[i, 0] = 4\n",
    "    else:\n",
    "        testLabel[i, 0] = 5\n",
    "    #Categorizing Duration\n",
    "    if testLabel[i, 1] < 10:\n",
    "        testLabel[i, 1] = 1\n",
    "    elif testLabel[i, 1] < 30:\n",
    "        testLabel[i, 1] = 2\n",
    "    elif testLabel[i, 1] < 60:\n",
    "        testLabel[i, 1] = 3\n",
    "    else:\n",
    "        testLabel[i, 1] = 4\n",
    "\n",
    "\n",
    "train_size = trainlabel.shape[0]\n",
    "Y_train1 = np.zeros((train_size,5))\n",
    "Y_train1[np.arange(train_size),trainlabel[:,0]-1] = 1\n",
    "Y_train2 = np.zeros((train_size,4))\n",
    "Y_train2[np.arange(train_size),trainlabel[:,1]-1] = 1\n",
    "Y_train3 = np.zeros((train_size,5))\n",
    "Y_train3[np.arange(train_size),trainlabel[:,2]-1] = 1\n",
    "\n",
    "val_size = valLabel.shape[0]\n",
    "Y_val1 = np.zeros((val_size,5))\n",
    "Y_val1[np.arange(val_size),valLabel[:,0]-1] = 1\n",
    "Y_val2 = np.zeros((val_size,4))\n",
    "Y_val2[np.arange(val_size),valLabel[:,1]-1] = 1\n",
    "Y_val3 = np.zeros((val_size,5))\n",
    "Y_val3[np.arange(val_size),valLabel[:,2]-1] = 1\n",
    "\n",
    "test_size = testLabel.shape[0]\n",
    "Y_test1 = np.zeros((test_size,5))\n",
    "Y_test1[np.arange(test_size),testLabel[:,0]-1] = 1\n",
    "Y_test2 = np.zeros((test_size,4))\n",
    "Y_test2[np.arange(test_size),testLabel[:,1]-1] = 1\n",
    "Y_test3 = np.zeros((test_size,5))\n",
    "Y_test3[np.arange(test_size),testLabel[:,2]-1] = 1\n",
    "\n",
    "# trainData = np.expand_dims(trainData, axis=-1)\n",
    "# testData = np.expand_dims(testData, axis=-1)\n",
    "trainData = trainData.reshape((trainData.shape[0], timestep, 2))\n",
    "testData = testData.reshape((testData.shape[0], timestep, 2))\n",
    "valData = valData.reshape((valData.shape[0], timestep, 2))\n",
    "\n",
    "def base_model():\n",
    "    input_shape = (timestep, 2)\n",
    "    mask_shape = (256,)\n",
    "\n",
    "    model_input = Input(shape=input_shape)\n",
    "    mask_input = Input(shape=mask_shape)\n",
    "\n",
    "    x = Flatten()(model_input)\n",
    "    x = Dense(128, activation='relu')(x)\n",
    "    x = Dense(64, activation='relu')(x)\n",
    "    encoded = Dense(32, activation='relu')(x)\n",
    "\n",
    "    x = Dense(64, activation='relu')(encoded)\n",
    "    x = Dense(128, activation='relu')(x)\n",
    "    decoded = Dense(np.prod(input_shape), activation='sigmoid')(x)\n",
    "    decoded = Reshape(input_shape)(decoded)\n",
    "\n",
    "    autoencoder = Model(model_input, decoded)\n",
    "\n",
    "    encoder = Model(model_input, encoded)\n",
    "\n",
    "    decoder_input = Input(shape=(32,))\n",
    "    x = Dense(64, activation='relu')(decoder_input)\n",
    "    x = Dense(128, activation='relu')(x)\n",
    "    decoded = Dense(np.prod(input_shape), activation='sigmoid')(x)\n",
    "    decoded = Reshape(input_shape)(decoded)\n",
    "    decoder = Model(decoder_input, decoded)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    x = Flatten()(encoded)\n",
    "\n",
    "    x = Dense(256, activation='relu')(x)\n",
    "    x = Dense(256, activation='relu')(x)\n",
    "\n",
    "    output1 = Dense(5, activation='softmax', name='Bandwidth')(x)\n",
    "    output2 = Dense(4, activation='softmax', name='Duration')(x)\n",
    "\n",
    "    x3 = Multiply()([x, mask_input])\n",
    "    output3 = Dense(5, activation='softmax', name='Class')(x3)\n",
    "\n",
    "    model = Model(inputs=[model_input, mask_input], outputs=[output1, output2, output3])\n",
    "    \n",
    "    opt = Adam(clipnorm = 1.)\n",
    "    model.compile(loss=['categorical_crossentropy', 'categorical_crossentropy', 'categorical_crossentropy'], \n",
    "                  loss_weights=[1, 1, lambda_value], optimizer=opt, metrics=['accuracy'])\n",
    "    return model\n",
    "\n",
    "model = base_model()\n",
    "\n",
    "model.fit([trainData, trainmask], [Y_train1, Y_train2, Y_train3],\n",
    "          validation_data=([valData, valmask], [Y_val1, Y_val2, Y_val3]),\n",
    "          batch_size=64, epochs=20, verbose=True, shuffle=True)\n",
    "\n",
    "result = model.evaluate([testData, testmask], [Y_test1, Y_test2, Y_test3])\n",
    "print(result)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.5"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
