{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'2.3.0'"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import os\n",
    "import json\n",
    "import codecs\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import yaml\n",
    "import DM_process_v1 as DM_process\n",
    "\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\n",
    "gpus = tf.config.experimental.list_physical_devices(device_type='GPU')\n",
    "for gpu in gpus:\n",
    "    tf.config.experimental.set_memory_growth(gpu, True)\n",
    "tf.__version__"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "params = {\n",
    "    'batch_size':16,\n",
    "    'lr' : 0.001,\n",
    "    'epochs': 160,\n",
    "    'drops' : [0.1]\n",
    "         }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('./DM_char.json', mode='r', encoding='utf-8') as f:\n",
    "    dicts = json.load(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "path = '../stories/all_stories.yml'\n",
    "with open(path, 'r', encoding='utf-8') as f:\n",
    "    dataset = yaml.load(f.read(),Loader=yaml.Loader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "action2id = dicts['action2id']\n",
    "intent2id = dicts['intent2id']\n",
    "slots2id = dicts['entities2id']\n",
    "id2action = dicts['id2action']\n",
    "id2intent = dicts['id2intent']\n",
    "id2slots = dicts['id2entities']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "previous_action_len = len(action2id)\n",
    "# print(previous_action_len)\n",
    "slots_len = len(slots2id)\n",
    "# print(slots_len)\n",
    "user_intent_len = len(intent2id)\n",
    "# print(user_intent_len)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_set = DM_process.split_data(dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "previous_action, slots, user_intent, action = DM_process.extract_conv_data(data_set,action2id,slots2id,intent2id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "def Dataset(previous_action_inputs, slots_inputs,user_intent_inputs,pre_action):\n",
    "    dataset = tf.data.Dataset.from_tensor_slices(({\n",
    "    \"previous_action_inputs\" : previous_action_inputs,\n",
    "    \"slots_inputs\" : slots_inputs,\n",
    "    \"user_intent_inputs\" : user_intent_inputs\n",
    "    },\n",
    "    {\n",
    "        \"pre_action\" : pre_action\n",
    "    }))\n",
    "    data_count = len(slots_inputs)\n",
    "    dataset = dataset.shuffle(buffer_size = data_count)\n",
    "    dataset = dataset.batch(params['batch_size'])\n",
    "    return dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "train_dataset =  Dataset(previous_action, slots, user_intent, action)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"functional_1\"\n",
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "previous_action_inputs (InputLa [(None, 35)]         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "slots_inputs (InputLayer)       [(None, 13)]         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "user_intent_inputs (InputLayer) [(None, 43)]         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "embedding (Embedding)           (None, 35, 64)       8192        previous_action_inputs[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "embedding_1 (Embedding)         (None, 13, 64)       8192        slots_inputs[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "embedding_2 (Embedding)         (None, 43, 64)       8192        user_intent_inputs[0][0]         \n",
      "__________________________________________________________________________________________________\n",
      "concatenate (Concatenate)       (None, 91, 64)       0           embedding[0][0]                  \n",
      "                                                                 embedding_1[0][0]                \n",
      "                                                                 embedding_2[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "bidirectional (Bidirectional)   (None, 91, 256)      148992      concatenate[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "layer_normalization (LayerNorma (None, 91, 256)      512         bidirectional[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "global_average_pooling1d (Globa (None, 256)          0           layer_normalization[0][0]        \n",
      "__________________________________________________________________________________________________\n",
      "pre_action (Dense)              (None, 35)           8995        global_average_pooling1d[0][0]   \n",
      "==================================================================================================\n",
      "Total params: 183,075\n",
      "Trainable params: 183,075\n",
      "Non-trainable params: 0\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "tf.keras.backend.clear_session()\n",
    "previous_action_inputs = tf.keras.layers.Input(shape=(previous_action_len,), name = 'previous_action_inputs')\n",
    "slots_inputs = tf.keras.layers.Input(shape = (slots_len,), name = 'slots_inputs')\n",
    "user_intent_inputs = tf.keras.layers.Input(shape = (user_intent_len,), name = 'user_intent_inputs')\n",
    "\n",
    "previous_action_embed = tf.keras.layers.Embedding(128,64)(previous_action_inputs)\n",
    "slots_embed = tf.keras.layers.Embedding(128,64)(slots_inputs)\n",
    "user_intent_embed = tf.keras.layers.Embedding(128,64)(user_intent_inputs)\n",
    "\n",
    "utter_inputs = tf.keras.layers.concatenate([previous_action_embed,slots_embed,user_intent_embed],axis=1)\n",
    "bilstm = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(128,return_sequences=True))(utter_inputs)\n",
    "x_in = tf.keras.layers.LayerNormalization()(bilstm)\n",
    "x_conv = tf.keras.layers.GlobalAveragePooling1D()(x_in)\n",
    "pre_action = tf.keras.layers.Dense(previous_action_len, activation='softmax',name = 'pre_action')(x_conv)\n",
    "model = tf.keras.Model([previous_action_inputs,slots_inputs,user_intent_inputs],pre_action)\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "losses = {'pre_action': 'categorical_crossentropy'}\n",
    "metrics = {'pre_action': ['accuracy']}\n",
    "optimizer = tf.keras.optimizers.Adam(params['lr'])\n",
    "model.compile(optimizer, loss=losses, metrics=metrics)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "file_path = '../DM_model_weight/DM_weight.h5'\n",
    "checkpoint = tf.keras.callbacks.ModelCheckpoint(file_path,\n",
    "                                                        save_weights_only=False, save_best_only=True)\n",
    "learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau(patience=50, factor=0.5)\n",
    "callbacks_list = [checkpoint,learning_rate_reduction]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/300\n",
      "24/27 [=========================>....] - ETA: 0s - loss: 1.3364 - accuracy: 0.8594WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 10ms/step - loss: 1.2796 - accuracy: 0.8578\n",
      "Epoch 2/300\n",
      "25/27 [==========================>...] - ETA: 0s - loss: 1.2269 - accuracy: 0.8450WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 9ms/step - loss: 1.1730 - accuracy: 0.8531\n",
      "Epoch 3/300\n",
      "25/27 [==========================>...] - ETA: 0s - loss: 1.1456 - accuracy: 0.8550WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 9ms/step - loss: 1.0927 - accuracy: 0.8626\n",
      "Epoch 4/300\n",
      "25/27 [==========================>...] - ETA: 0s - loss: 1.0340 - accuracy: 0.8650WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 9ms/step - loss: 1.0315 - accuracy: 0.8673\n",
      "Epoch 5/300\n",
      "25/27 [==========================>...] - ETA: 0s - loss: 0.8920 - accuracy: 0.8800WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 9ms/step - loss: 0.9779 - accuracy: 0.8720\n",
      "Epoch 6/300\n",
      "25/27 [==========================>...] - ETA: 0s - loss: 0.9528 - accuracy: 0.8750WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 9ms/step - loss: 0.9286 - accuracy: 0.8768\n",
      "Epoch 7/300\n",
      "25/27 [==========================>...] - ETA: 0s - loss: 0.9266 - accuracy: 0.8650WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 9ms/step - loss: 0.8894 - accuracy: 0.8673\n",
      "Epoch 8/300\n",
      "24/27 [=========================>....] - ETA: 0s - loss: 0.9190 - accuracy: 0.8594WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 10ms/step - loss: 0.8579 - accuracy: 0.8673\n",
      "Epoch 9/300\n",
      "26/27 [===========================>..] - ETA: 0s - loss: 0.8104 - accuracy: 0.8798WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 11ms/step - loss: 0.8156 - accuracy: 0.8768\n",
      "Epoch 10/300\n",
      "26/27 [===========================>..] - ETA: 0s - loss: 0.7934 - accuracy: 0.8798WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 11ms/step - loss: 0.7837 - accuracy: 0.8815\n",
      "Epoch 11/300\n",
      "23/27 [========================>.....] - ETA: 0s - loss: 0.6996 - accuracy: 0.8804WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 9ms/step - loss: 0.7503 - accuracy: 0.8815\n",
      "Epoch 12/300\n",
      "26/27 [===========================>..] - ETA: 0s - loss: 0.7418 - accuracy: 0.8702WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 8ms/step - loss: 0.7315 - accuracy: 0.8720\n",
      "Epoch 13/300\n",
      "25/27 [==========================>...] - ETA: 0s - loss: 0.6452 - accuracy: 0.8850WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 9ms/step - loss: 0.6988 - accuracy: 0.8768\n",
      "Epoch 14/300\n",
      "25/27 [==========================>...] - ETA: 0s - loss: 0.6795 - accuracy: 0.8750WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 9ms/step - loss: 0.6739 - accuracy: 0.8768\n",
      "Epoch 15/300\n",
      "27/27 [==============================] - ETA: 0s - loss: 0.6475 - accuracy: 0.8768WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 10ms/step - loss: 0.6475 - accuracy: 0.8768\n",
      "Epoch 16/300\n",
      "22/27 [=======================>......] - ETA: 0s - loss: 0.6454 - accuracy: 0.8864WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 9ms/step - loss: 0.6319 - accuracy: 0.8768\n",
      "Epoch 17/300\n",
      "25/27 [==========================>...] - ETA: 0s - loss: 0.6380 - accuracy: 0.8700WARNING:tensorflow:Can save best model only with val_loss available, skipping.\n",
      "WARNING:tensorflow:Reduce LR on plateau conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy,lr\n",
      "27/27 [==============================] - 0s 9ms/step - loss: 0.6202 - accuracy: 0.8768\n",
      "Epoch 18/300\n",
      "19/27 [====================>.........] - ETA: 0s - loss: 0.5925 - accuracy: 0.8882"
     ]
    }
   ],
   "source": [
    "# model.fit(x=[previous_action, slots, user_intent],y=action,batch_size=8,epochs=params['epochs'],callbacks=callbacks_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 3.2468 - accuracy: 0.1517\n",
      "Epoch 2/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 3.1332 - accuracy: 0.1517\n",
      "Epoch 3/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 3.1104 - accuracy: 0.1564\n",
      "Epoch 4/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 3.0622 - accuracy: 0.1896\n",
      "Epoch 5/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 3.0584 - accuracy: 0.1896\n",
      "Epoch 6/160\n",
      "14/14 [==============================] - 0s 12ms/step - loss: 3.0616 - accuracy: 0.1896\n",
      "Epoch 7/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 3.0612 - accuracy: 0.1469\n",
      "Epoch 8/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 3.0504 - accuracy: 0.1896\n",
      "Epoch 9/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 3.0190 - accuracy: 0.1374\n",
      "Epoch 10/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.9946 - accuracy: 0.1896\n",
      "Epoch 11/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 3.0130 - accuracy: 0.1374\n",
      "Epoch 12/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.9903 - accuracy: 0.1896\n",
      "Epoch 13/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.9650 - accuracy: 0.1896\n",
      "Epoch 14/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.9704 - accuracy: 0.2038\n",
      "Epoch 15/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.9280 - accuracy: 0.1943\n",
      "Epoch 16/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.8102 - accuracy: 0.2417\n",
      "Epoch 17/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.7775 - accuracy: 0.2417\n",
      "Epoch 18/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.7736 - accuracy: 0.2464\n",
      "Epoch 19/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.7864 - accuracy: 0.2085\n",
      "Epoch 20/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.7089 - accuracy: 0.2654\n",
      "Epoch 21/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.6446 - accuracy: 0.2796\n",
      "Epoch 22/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.6074 - accuracy: 0.2654\n",
      "Epoch 23/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.5654 - accuracy: 0.3033\n",
      "Epoch 24/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.5525 - accuracy: 0.2796\n",
      "Epoch 25/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 2.4642 - accuracy: 0.2607\n",
      "Epoch 26/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.5410 - accuracy: 0.2607\n",
      "Epoch 27/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.4641 - accuracy: 0.2986\n",
      "Epoch 28/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.3953 - accuracy: 0.3270\n",
      "Epoch 29/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.3427 - accuracy: 0.2512\n",
      "Epoch 30/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.2846 - accuracy: 0.3318\n",
      "Epoch 31/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.2821 - accuracy: 0.3365\n",
      "Epoch 32/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.2859 - accuracy: 0.3270\n",
      "Epoch 33/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.1961 - accuracy: 0.3365\n",
      "Epoch 34/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.1122 - accuracy: 0.3460\n",
      "Epoch 35/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.1760 - accuracy: 0.3365\n",
      "Epoch 36/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.1704 - accuracy: 0.3318\n",
      "Epoch 37/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.0626 - accuracy: 0.3744\n",
      "Epoch 38/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.0575 - accuracy: 0.3934\n",
      "Epoch 39/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.9144 - accuracy: 0.4123\n",
      "Epoch 40/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.8640 - accuracy: 0.4408\n",
      "Epoch 41/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.7763 - accuracy: 0.4597\n",
      "Epoch 42/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.7899 - accuracy: 0.4502\n",
      "Epoch 43/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.7017 - accuracy: 0.4645\n",
      "Epoch 44/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.5602 - accuracy: 0.5355\n",
      "Epoch 45/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.4894 - accuracy: 0.5450\n",
      "Epoch 46/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.4112 - accuracy: 0.5687\n",
      "Epoch 47/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.4195 - accuracy: 0.5592\n",
      "Epoch 48/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.4064 - accuracy: 0.5545\n",
      "Epoch 49/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.3467 - accuracy: 0.5924\n",
      "Epoch 50/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.4219 - accuracy: 0.5355\n",
      "Epoch 51/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.4210 - accuracy: 0.5261\n",
      "Epoch 52/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.3474 - accuracy: 0.5545\n",
      "Epoch 53/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.3614 - accuracy: 0.5450\n",
      "Epoch 54/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.3203 - accuracy: 0.5592\n",
      "Epoch 55/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.2399 - accuracy: 0.5829\n",
      "Epoch 56/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.2520 - accuracy: 0.5924\n",
      "Epoch 57/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.2230 - accuracy: 0.5735\n",
      "Epoch 58/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.3246 - accuracy: 0.5687\n",
      "Epoch 59/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.2191 - accuracy: 0.5972\n",
      "Epoch 60/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.1803 - accuracy: 0.5829\n",
      "Epoch 61/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.0715 - accuracy: 0.6303\n",
      "Epoch 62/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.0919 - accuracy: 0.6398\n",
      "Epoch 63/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.0877 - accuracy: 0.6351\n",
      "Epoch 64/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.1278 - accuracy: 0.6066\n",
      "Epoch 65/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.1324 - accuracy: 0.6256\n",
      "Epoch 66/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.0781 - accuracy: 0.6019\n",
      "Epoch 67/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.9858 - accuracy: 0.6825\n",
      "Epoch 68/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.0199 - accuracy: 0.6114\n",
      "Epoch 69/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.9702 - accuracy: 0.6588\n",
      "Epoch 70/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.9661 - accuracy: 0.6825\n",
      "Epoch 71/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.0335 - accuracy: 0.6303\n",
      "Epoch 72/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.0152 - accuracy: 0.6256\n",
      "Epoch 73/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.0299 - accuracy: 0.6256\n",
      "Epoch 74/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.9303 - accuracy: 0.6872\n",
      "Epoch 75/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.9139 - accuracy: 0.6872\n",
      "Epoch 76/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.8736 - accuracy: 0.7156\n",
      "Epoch 77/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.8761 - accuracy: 0.7204\n",
      "Epoch 78/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.8591 - accuracy: 0.7062\n",
      "Epoch 79/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.8816 - accuracy: 0.7204\n",
      "Epoch 80/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.7851 - accuracy: 0.7441\n",
      "Epoch 81/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.7471 - accuracy: 0.7299\n",
      "Epoch 82/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.7672 - accuracy: 0.7156\n",
      "Epoch 83/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.7736 - accuracy: 0.7536\n",
      "Epoch 84/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.8007 - accuracy: 0.7204\n",
      "Epoch 85/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.8115 - accuracy: 0.7156\n",
      "Epoch 86/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.7818 - accuracy: 0.7251\n",
      "Epoch 87/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.6718 - accuracy: 0.7630\n",
      "Epoch 88/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.6365 - accuracy: 0.7725\n",
      "Epoch 89/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.6711 - accuracy: 0.7678\n",
      "Epoch 90/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.6951 - accuracy: 0.7441\n",
      "Epoch 91/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.9867 - accuracy: 0.6730\n",
      "Epoch 92/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.1477 - accuracy: 0.6303\n",
      "Epoch 93/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.0262 - accuracy: 0.6445\n",
      "Epoch 94/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.9262 - accuracy: 0.6825\n",
      "Epoch 95/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.8622 - accuracy: 0.7062\n",
      "Epoch 96/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.7558 - accuracy: 0.7346\n",
      "Epoch 97/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.7190 - accuracy: 0.7630\n",
      "Epoch 98/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.6582 - accuracy: 0.7725\n",
      "Epoch 99/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.6046 - accuracy: 0.7867\n",
      "Epoch 100/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.5727 - accuracy: 0.8009\n",
      "Epoch 101/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.5484 - accuracy: 0.8152\n",
      "Epoch 102/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.6305 - accuracy: 0.7393\n",
      "Epoch 103/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.6523 - accuracy: 0.8152\n",
      "Epoch 104/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.6646 - accuracy: 0.7630\n",
      "Epoch 105/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.6710 - accuracy: 0.7820\n",
      "Epoch 106/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.5630 - accuracy: 0.8199\n",
      "Epoch 107/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.5214 - accuracy: 0.8057\n",
      "Epoch 108/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.5022 - accuracy: 0.8104\n",
      "Epoch 109/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.4548 - accuracy: 0.8531\n",
      "Epoch 110/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.4432 - accuracy: 0.8341\n",
      "Epoch 111/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.4728 - accuracy: 0.8341\n",
      "Epoch 112/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.4511 - accuracy: 0.8720\n",
      "Epoch 113/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.4329 - accuracy: 0.8436\n",
      "Epoch 114/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.4835 - accuracy: 0.8341\n",
      "Epoch 115/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.4994 - accuracy: 0.8294\n",
      "Epoch 116/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.5086 - accuracy: 0.8246\n",
      "Epoch 117/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.4306 - accuracy: 0.8626\n",
      "Epoch 118/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.5302 - accuracy: 0.8294\n",
      "Epoch 119/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.3953 - accuracy: 0.8626\n",
      "Epoch 120/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.4138 - accuracy: 0.8815\n",
      "Epoch 121/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.5055 - accuracy: 0.7867\n",
      "Epoch 122/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.5182 - accuracy: 0.8057\n",
      "Epoch 123/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.4104 - accuracy: 0.8531\n",
      "Epoch 124/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.3205 - accuracy: 0.8910\n",
      "Epoch 125/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.3124 - accuracy: 0.9147\n",
      "Epoch 126/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.2991 - accuracy: 0.9384\n",
      "Epoch 127/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.2824 - accuracy: 0.9289\n",
      "Epoch 128/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.2584 - accuracy: 0.9431\n",
      "Epoch 129/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.2696 - accuracy: 0.9384\n",
      "Epoch 130/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.3696 - accuracy: 0.8863\n",
      "Epoch 131/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.3426 - accuracy: 0.8957\n",
      "Epoch 132/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.4165 - accuracy: 0.8578\n",
      "Epoch 133/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.3489 - accuracy: 0.8863\n",
      "Epoch 134/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.3561 - accuracy: 0.8863\n",
      "Epoch 135/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.2923 - accuracy: 0.9242\n",
      "Epoch 136/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.2816 - accuracy: 0.9052\n",
      "Epoch 137/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.2935 - accuracy: 0.8957\n",
      "Epoch 138/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.2681 - accuracy: 0.9147\n",
      "Epoch 139/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.2933 - accuracy: 0.9147\n",
      "Epoch 140/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.2350 - accuracy: 0.9336\n",
      "Epoch 141/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.2147 - accuracy: 0.9384\n",
      "Epoch 142/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.2200 - accuracy: 0.9336\n",
      "Epoch 143/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.2057 - accuracy: 0.9479\n",
      "Epoch 144/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.2090 - accuracy: 0.9526\n",
      "Epoch 145/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.2267 - accuracy: 0.9289\n",
      "Epoch 146/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.2103 - accuracy: 0.9336\n",
      "Epoch 147/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.2140 - accuracy: 0.9479\n",
      "Epoch 148/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.3155 - accuracy: 0.8720\n",
      "Epoch 149/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.2258 - accuracy: 0.9289\n",
      "Epoch 150/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.2105 - accuracy: 0.9384\n",
      "Epoch 151/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.1762 - accuracy: 0.9479\n",
      "Epoch 152/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.1790 - accuracy: 0.9431\n",
      "Epoch 153/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.1764 - accuracy: 0.9526\n",
      "Epoch 154/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.2192 - accuracy: 0.9289\n",
      "Epoch 155/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.1985 - accuracy: 0.9384\n",
      "Epoch 156/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.1795 - accuracy: 0.9384\n",
      "Epoch 157/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.1684 - accuracy: 0.9431\n",
      "Epoch 158/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.1458 - accuracy: 0.9526\n",
      "Epoch 159/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.1476 - accuracy: 0.9479\n",
      "Epoch 160/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.1394 - accuracy: 0.9621\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<tensorflow.python.keras.callbacks.History at 0x7f49aead3110>"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.fit(train_dataset,epochs=params['epochs'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# model.save_weights('../DM_model_weight/DM_weight_629.h5')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.save(file_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
