{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'2.3.0'"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import os\n",
    "import json\n",
    "import codecs\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import yaml\n",
    "import DM_process_v1 as DM_process\n",
    "\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\n",
    "gpus = tf.config.experimental.list_physical_devices(device_type='GPU')\n",
    "for gpu in gpus:\n",
    "    tf.config.experimental.set_memory_growth(gpu, True)\n",
    "tf.__version__"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "params = {\n",
    "    'batch_size':16,\n",
    "    'lr' : 0.001,\n",
    "    'epochs': 160,\n",
    "    'drops' : [0.1]\n",
    "         }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('./DM_char.json', mode='r', encoding='utf-8') as f:\n",
    "    dicts = json.load(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "path = '../stories/all_stories.yml'\n",
    "with open(path, 'r', encoding='utf-8') as f:\n",
    "    dataset = yaml.load(f.read(),Loader=yaml.Loader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "action2id = dicts['action2id']\n",
    "intent2id = dicts['intent2id']\n",
    "slots2id = dicts['entities2id']\n",
    "id2action = dicts['id2action']\n",
    "id2intent = dicts['id2intent']\n",
    "id2slots = dicts['id2entities']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "previous_action_len = len(action2id)\n",
    "# print(previous_action_len)\n",
    "slots_len = len(slots2id)\n",
    "# print(slots_len)\n",
    "user_intent_len = len(intent2id)\n",
    "# print(user_intent_len)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_set = DM_process.split_data(dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "previous_action, slots, user_intent, action = DM_process.extract_conv_data(data_set,action2id,slots2id,intent2id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "def Dataset(previous_action_inputs, slots_inputs,user_intent_inputs,pre_action):\n",
    "    dataset = tf.data.Dataset.from_tensor_slices(({\n",
    "    \"previous_action_inputs\" : previous_action_inputs,\n",
    "    \"slots_inputs\" : slots_inputs,\n",
    "    \"user_intent_inputs\" : user_intent_inputs\n",
    "    },\n",
    "    {\n",
    "        \"pre_action\" : pre_action\n",
    "    }))\n",
    "    data_count = len(slots_inputs)\n",
    "    dataset = dataset.shuffle(buffer_size = data_count)\n",
    "    dataset = dataset.batch(params['batch_size'])\n",
    "    return dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "train_dataset =  Dataset(previous_action, slots, user_intent, action)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"functional_1\"\n",
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "previous_action_inputs (InputLa [(None, 35)]         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "slots_inputs (InputLayer)       [(None, 13)]         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "user_intent_inputs (InputLayer) [(None, 42)]         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "embedding (Embedding)           (None, 35, 64)       8192        previous_action_inputs[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "embedding_1 (Embedding)         (None, 13, 64)       8192        slots_inputs[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "embedding_2 (Embedding)         (None, 42, 64)       8192        user_intent_inputs[0][0]         \n",
      "__________________________________________________________________________________________________\n",
      "concatenate (Concatenate)       (None, 90, 64)       0           embedding[0][0]                  \n",
      "                                                                 embedding_1[0][0]                \n",
      "                                                                 embedding_2[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "bidirectional (Bidirectional)   (None, 90, 256)      148992      concatenate[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "layer_normalization (LayerNorma (None, 90, 256)      512         bidirectional[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "global_average_pooling1d (Globa (None, 256)          0           layer_normalization[0][0]        \n",
      "__________________________________________________________________________________________________\n",
      "pre_action (Dense)              (None, 35)           8995        global_average_pooling1d[0][0]   \n",
      "==================================================================================================\n",
      "Total params: 183,075\n",
      "Trainable params: 183,075\n",
      "Non-trainable params: 0\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "tf.keras.backend.clear_session()\n",
    "previous_action_inputs = tf.keras.layers.Input(shape=(previous_action_len,), name = 'previous_action_inputs')\n",
    "slots_inputs = tf.keras.layers.Input(shape = (slots_len,), name = 'slots_inputs')\n",
    "user_intent_inputs = tf.keras.layers.Input(shape = (user_intent_len,), name = 'user_intent_inputs')\n",
    "\n",
    "previous_action_embed = tf.keras.layers.Embedding(128,64)(previous_action_inputs)\n",
    "slots_embed = tf.keras.layers.Embedding(128,64)(slots_inputs)\n",
    "user_intent_embed = tf.keras.layers.Embedding(128,64)(user_intent_inputs)\n",
    "\n",
    "utter_inputs = tf.keras.layers.concatenate([previous_action_embed,slots_embed,user_intent_embed],axis=1)\n",
    "bilstm = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(128,return_sequences=True))(utter_inputs)\n",
    "x_in = tf.keras.layers.LayerNormalization()(bilstm)\n",
    "x_conv = tf.keras.layers.GlobalAveragePooling1D()(x_in)\n",
    "pre_action = tf.keras.layers.Dense(previous_action_len, activation='softmax',name = 'pre_action')(x_conv)\n",
    "model = tf.keras.Model([previous_action_inputs,slots_inputs,user_intent_inputs],pre_action)\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "losses = {'pre_action': 'categorical_crossentropy'}\n",
    "metrics = {'pre_action': ['accuracy']}\n",
    "optimizer = tf.keras.optimizers.Adam(params['lr'])\n",
    "model.compile(optimizer, loss=losses, metrics=metrics)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "file_path = '../DM_model_weight/DM_weight.h5'\n",
    "checkpoint = tf.keras.callbacks.ModelCheckpoint(file_path,\n",
    "                                                        save_weights_only=False, save_best_only=True)\n",
    "learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau(patience=50, factor=0.5)\n",
    "callbacks_list = [checkpoint,learning_rate_reduction]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# model.fit(x=[previous_action, slots, user_intent],y=action,batch_size=8,epochs=params['epochs'],callbacks=callbacks_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/160\n",
      "14/14 [==============================] - 0s 12ms/step - loss: 3.2718 - accuracy: 0.1577\n",
      "Epoch 2/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 3.1055 - accuracy: 0.1622\n",
      "Epoch 3/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 3.1257 - accuracy: 0.1577\n",
      "Epoch 4/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 3.0822 - accuracy: 0.1847\n",
      "Epoch 5/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 3.0617 - accuracy: 0.1847\n",
      "Epoch 6/160\n",
      "14/14 [==============================] - 0s 15ms/step - loss: 3.0667 - accuracy: 0.1847\n",
      "Epoch 7/160\n",
      "14/14 [==============================] - 0s 18ms/step - loss: 3.0440 - accuracy: 0.1757\n",
      "Epoch 8/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 3.0486 - accuracy: 0.1577\n",
      "Epoch 9/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 3.0415 - accuracy: 0.1712\n",
      "Epoch 10/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 3.0182 - accuracy: 0.1712\n",
      "Epoch 11/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 3.0086 - accuracy: 0.1847\n",
      "Epoch 12/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 2.9913 - accuracy: 0.1847\n",
      "Epoch 13/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 2.9599 - accuracy: 0.1802\n",
      "Epoch 14/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.8691 - accuracy: 0.2027\n",
      "Epoch 15/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.8330 - accuracy: 0.2207\n",
      "Epoch 16/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.8015 - accuracy: 0.2027\n",
      "Epoch 17/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.8112 - accuracy: 0.1937\n",
      "Epoch 18/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.7095 - accuracy: 0.2523\n",
      "Epoch 19/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.6896 - accuracy: 0.2297\n",
      "Epoch 20/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.7407 - accuracy: 0.1982\n",
      "Epoch 21/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.7481 - accuracy: 0.2342\n",
      "Epoch 22/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.6708 - accuracy: 0.2477\n",
      "Epoch 23/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.6563 - accuracy: 0.2613\n",
      "Epoch 24/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.6195 - accuracy: 0.2207\n",
      "Epoch 25/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.6026 - accuracy: 0.2523\n",
      "Epoch 26/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.5949 - accuracy: 0.2613\n",
      "Epoch 27/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.5871 - accuracy: 0.2477\n",
      "Epoch 28/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.5183 - accuracy: 0.2658\n",
      "Epoch 29/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.5068 - accuracy: 0.2613\n",
      "Epoch 30/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.4840 - accuracy: 0.2838\n",
      "Epoch 31/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.4748 - accuracy: 0.2703\n",
      "Epoch 32/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 2.4952 - accuracy: 0.2613\n",
      "Epoch 33/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.4776 - accuracy: 0.2523\n",
      "Epoch 34/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.4166 - accuracy: 0.3063\n",
      "Epoch 35/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.4066 - accuracy: 0.3108\n",
      "Epoch 36/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.3502 - accuracy: 0.3018\n",
      "Epoch 37/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.3820 - accuracy: 0.3018\n",
      "Epoch 38/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.3312 - accuracy: 0.2748\n",
      "Epoch 39/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.2603 - accuracy: 0.3423\n",
      "Epoch 40/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.3069 - accuracy: 0.2928\n",
      "Epoch 41/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.2968 - accuracy: 0.2883\n",
      "Epoch 42/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.2221 - accuracy: 0.3063\n",
      "Epoch 43/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 2.2006 - accuracy: 0.3198\n",
      "Epoch 44/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.1157 - accuracy: 0.3919\n",
      "Epoch 45/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.0956 - accuracy: 0.3919\n",
      "Epoch 46/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.1373 - accuracy: 0.4414\n",
      "Epoch 47/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 2.0740 - accuracy: 0.3829\n",
      "Epoch 48/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 2.0419 - accuracy: 0.4279\n",
      "Epoch 49/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.9761 - accuracy: 0.4595\n",
      "Epoch 50/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.9034 - accuracy: 0.4550\n",
      "Epoch 51/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.8643 - accuracy: 0.4595\n",
      "Epoch 52/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.8201 - accuracy: 0.4505\n",
      "Epoch 53/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.7856 - accuracy: 0.5000\n",
      "Epoch 54/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.7927 - accuracy: 0.4955\n",
      "Epoch 55/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.7572 - accuracy: 0.5045\n",
      "Epoch 56/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.6612 - accuracy: 0.5270\n",
      "Epoch 57/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.6208 - accuracy: 0.5090\n",
      "Epoch 58/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.5778 - accuracy: 0.5450\n",
      "Epoch 59/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.5043 - accuracy: 0.5541\n",
      "Epoch 60/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.4708 - accuracy: 0.5405\n",
      "Epoch 61/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.4505 - accuracy: 0.5631\n",
      "Epoch 62/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.4117 - accuracy: 0.5315\n",
      "Epoch 63/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.4595 - accuracy: 0.5495\n",
      "Epoch 64/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.3712 - accuracy: 0.5495\n",
      "Epoch 65/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.2613 - accuracy: 0.5991\n",
      "Epoch 66/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.2003 - accuracy: 0.6306\n",
      "Epoch 67/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.1489 - accuracy: 0.6306\n",
      "Epoch 68/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.1009 - accuracy: 0.6351\n",
      "Epoch 69/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.1445 - accuracy: 0.6261\n",
      "Epoch 70/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.4763 - accuracy: 0.5405\n",
      "Epoch 71/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.3438 - accuracy: 0.5360\n",
      "Epoch 72/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.1208 - accuracy: 0.6171\n",
      "Epoch 73/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 1.0641 - accuracy: 0.6712\n",
      "Epoch 74/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 1.0267 - accuracy: 0.6802\n",
      "Epoch 75/160\n",
      "14/14 [==============================] - 0s 8ms/step - loss: 0.9960 - accuracy: 0.6757\n",
      "Epoch 76/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.9602 - accuracy: 0.6712\n",
      "Epoch 77/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.9425 - accuracy: 0.6892\n",
      "Epoch 78/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.9331 - accuracy: 0.6982\n",
      "Epoch 79/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.9586 - accuracy: 0.6802\n",
      "Epoch 80/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.9262 - accuracy: 0.6982\n",
      "Epoch 81/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.9401 - accuracy: 0.7027\n",
      "Epoch 82/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.8860 - accuracy: 0.7117\n",
      "Epoch 83/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.8792 - accuracy: 0.7027\n",
      "Epoch 84/160\n",
      "14/14 [==============================] - 0s 9ms/step - loss: 0.8612 - accuracy: 0.6937\n",
      "Epoch 85/160\n",
      "14/14 [==============================] - 0s 15ms/step - loss: 1.2308 - accuracy: 0.6126\n",
      "Epoch 86/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 1.7490 - accuracy: 0.4730\n",
      "Epoch 87/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 1.5474 - accuracy: 0.5315\n",
      "Epoch 88/160\n",
      "14/14 [==============================] - 0s 16ms/step - loss: 1.2408 - accuracy: 0.6486\n",
      "Epoch 89/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 1.1062 - accuracy: 0.6802\n",
      "Epoch 90/160\n",
      "14/14 [==============================] - 0s 15ms/step - loss: 1.0875 - accuracy: 0.6441\n",
      "Epoch 91/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 1.0020 - accuracy: 0.6622\n",
      "Epoch 92/160\n",
      "14/14 [==============================] - 0s 17ms/step - loss: 0.9257 - accuracy: 0.7072\n",
      "Epoch 93/160\n",
      "14/14 [==============================] - 0s 15ms/step - loss: 0.8793 - accuracy: 0.7117\n",
      "Epoch 94/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.8372 - accuracy: 0.7162\n",
      "Epoch 95/160\n",
      "14/14 [==============================] - 0s 18ms/step - loss: 0.8149 - accuracy: 0.7432\n",
      "Epoch 96/160\n",
      "14/14 [==============================] - 0s 19ms/step - loss: 0.9402 - accuracy: 0.6937\n",
      "Epoch 97/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 1.0046 - accuracy: 0.6802\n",
      "Epoch 98/160\n",
      "14/14 [==============================] - 0s 12ms/step - loss: 1.1540 - accuracy: 0.6126\n",
      "Epoch 99/160\n",
      "14/14 [==============================] - 0s 12ms/step - loss: 0.9661 - accuracy: 0.6622\n",
      "Epoch 100/160\n",
      "14/14 [==============================] - 0s 15ms/step - loss: 0.9502 - accuracy: 0.6847\n",
      "Epoch 101/160\n",
      "14/14 [==============================] - 0s 15ms/step - loss: 0.8714 - accuracy: 0.6982\n",
      "Epoch 102/160\n",
      "14/14 [==============================] - 0s 15ms/step - loss: 0.7701 - accuracy: 0.7117\n",
      "Epoch 103/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 0.7274 - accuracy: 0.7523\n",
      "Epoch 104/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.7256 - accuracy: 0.7703\n",
      "Epoch 105/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 0.6818 - accuracy: 0.7838\n",
      "Epoch 106/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.6651 - accuracy: 0.7658\n",
      "Epoch 107/160\n",
      "14/14 [==============================] - 0s 15ms/step - loss: 0.6473 - accuracy: 0.7973\n",
      "Epoch 108/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.6473 - accuracy: 0.7613\n",
      "Epoch 109/160\n",
      "14/14 [==============================] - 0s 12ms/step - loss: 0.6264 - accuracy: 0.8153\n",
      "Epoch 110/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.6108 - accuracy: 0.7883\n",
      "Epoch 111/160\n",
      "14/14 [==============================] - 0s 19ms/step - loss: 0.5865 - accuracy: 0.7793\n",
      "Epoch 112/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.5661 - accuracy: 0.8153\n",
      "Epoch 113/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.5814 - accuracy: 0.7838\n",
      "Epoch 114/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.5663 - accuracy: 0.8063\n",
      "Epoch 115/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 0.6110 - accuracy: 0.7793\n",
      "Epoch 116/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.5881 - accuracy: 0.7973\n",
      "Epoch 117/160\n",
      "14/14 [==============================] - 0s 12ms/step - loss: 0.5991 - accuracy: 0.7928\n",
      "Epoch 118/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.5683 - accuracy: 0.7928\n",
      "Epoch 119/160\n",
      "14/14 [==============================] - 0s 12ms/step - loss: 0.5378 - accuracy: 0.7883\n",
      "Epoch 120/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.5142 - accuracy: 0.8153\n",
      "Epoch 121/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 0.5590 - accuracy: 0.8153\n",
      "Epoch 122/160\n",
      "14/14 [==============================] - 0s 16ms/step - loss: 0.5254 - accuracy: 0.8153\n",
      "Epoch 123/160\n",
      "14/14 [==============================] - 0s 12ms/step - loss: 0.5206 - accuracy: 0.8063\n",
      "Epoch 124/160\n",
      "14/14 [==============================] - 0s 15ms/step - loss: 0.5301 - accuracy: 0.7658\n",
      "Epoch 125/160\n",
      "14/14 [==============================] - 0s 19ms/step - loss: 0.5066 - accuracy: 0.8243\n",
      "Epoch 126/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.5180 - accuracy: 0.7973\n",
      "Epoch 127/160\n",
      "14/14 [==============================] - 0s 12ms/step - loss: 0.4722 - accuracy: 0.8018\n",
      "Epoch 128/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.4723 - accuracy: 0.8063\n",
      "Epoch 129/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.4972 - accuracy: 0.8288\n",
      "Epoch 130/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.4311 - accuracy: 0.8423\n",
      "Epoch 131/160\n",
      "14/14 [==============================] - 0s 19ms/step - loss: 0.4408 - accuracy: 0.8468\n",
      "Epoch 132/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.4281 - accuracy: 0.8514\n",
      "Epoch 133/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 0.4340 - accuracy: 0.8333\n",
      "Epoch 134/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.4470 - accuracy: 0.8243\n",
      "Epoch 135/160\n",
      "14/14 [==============================] - 0s 15ms/step - loss: 0.4631 - accuracy: 0.8243\n",
      "Epoch 136/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.4408 - accuracy: 0.8378\n",
      "Epoch 137/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.4433 - accuracy: 0.8153\n",
      "Epoch 138/160\n",
      "14/14 [==============================] - 0s 11ms/step - loss: 0.4235 - accuracy: 0.8333\n",
      "Epoch 139/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.4262 - accuracy: 0.8333\n",
      "Epoch 140/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.4145 - accuracy: 0.8288\n",
      "Epoch 141/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.4158 - accuracy: 0.8514\n",
      "Epoch 142/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.4231 - accuracy: 0.8288\n",
      "Epoch 143/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.4099 - accuracy: 0.8063\n",
      "Epoch 144/160\n",
      "14/14 [==============================] - 0s 16ms/step - loss: 0.3979 - accuracy: 0.8288\n",
      "Epoch 145/160\n",
      "14/14 [==============================] - 0s 17ms/step - loss: 0.3924 - accuracy: 0.8288\n",
      "Epoch 146/160\n",
      "14/14 [==============================] - 0s 16ms/step - loss: 0.4235 - accuracy: 0.8423\n",
      "Epoch 147/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.3850 - accuracy: 0.8378\n",
      "Epoch 148/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.3835 - accuracy: 0.8649\n",
      "Epoch 149/160\n",
      "14/14 [==============================] - 0s 16ms/step - loss: 0.3685 - accuracy: 0.8559\n",
      "Epoch 150/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.3733 - accuracy: 0.8468\n",
      "Epoch 151/160\n",
      "14/14 [==============================] - 0s 14ms/step - loss: 0.3688 - accuracy: 0.8468\n",
      "Epoch 152/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.3727 - accuracy: 0.8559\n",
      "Epoch 153/160\n",
      "14/14 [==============================] - 0s 20ms/step - loss: 0.3835 - accuracy: 0.8243\n",
      "Epoch 154/160\n",
      "14/14 [==============================] - 0s 12ms/step - loss: 0.3701 - accuracy: 0.8378\n",
      "Epoch 155/160\n",
      "14/14 [==============================] - 0s 13ms/step - loss: 0.3382 - accuracy: 0.8694\n",
      "Epoch 156/160\n",
      "14/14 [==============================] - 0s 12ms/step - loss: 0.3403 - accuracy: 0.8694\n",
      "Epoch 157/160\n",
      "14/14 [==============================] - 0s 10ms/step - loss: 0.3395 - accuracy: 0.8739\n",
      "Epoch 158/160\n",
      "14/14 [==============================] - 0s 19ms/step - loss: 0.3437 - accuracy: 0.8694\n",
      "Epoch 159/160\n",
      "14/14 [==============================] - 0s 19ms/step - loss: 0.3576 - accuracy: 0.8514\n",
      "Epoch 160/160\n",
      "14/14 [==============================] - 0s 17ms/step - loss: 0.3988 - accuracy: 0.8378\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<tensorflow.python.keras.callbacks.History at 0x7fe49163a410>"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.fit(train_dataset,epochs=params['epochs'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# model.save_weights('../DM_model_weight/DM_weight_629.h5')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.save(file_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
