{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from utils import *\n",
    "from itertools import product\n",
    "from keras import Model\n",
    "import tensorflow as tf\n",
    "import tensorflow.keras\n",
    "from tensorflow.keras.metrics import RootMeanSquaredError\n",
    "from tensorflow.keras.optimizers import Adam,SGD,Adagrad\n",
    "from keras.layers import Dense,Conv1D,Conv2D,Flatten,MaxPool1D,LeakyReLU,MaxPooling1D,BatchNormalization,Input,LSTM,normalization,ReLU,Add\n",
    "from keras.activations import sigmoid\n",
    "import keras\n",
    "from tensorflow.keras.experimental import WideDeepModel,LinearModel\n",
    "from keras.models import Sequential\n",
    "from keras.layers.merge import concatenate\n",
    "from keras import backend as K"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def cross_transformation(X,C):\n",
    "    phi = np.zeros(shape=(X.shape[0],7))\n",
    "    for i in range(7):\n",
    "        phi[:,i] = X[:,i]**C[i]\n",
    "    phi = np.prod(phi,axis=1)\n",
    "    phi = phi.reshape(phi.shape[0],1)\n",
    "    phi = np.concatenate((X,phi),axis=1)\n",
    "    return phi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_train,X_test,Y_train,Y_test,scaler = import_dataset(normalised = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "lst = list(product([0, 1], repeat=7))\n",
    "for i in range(len(lst)):\n",
    "    X_train = cross_transformation(X_train,lst[i])\n",
    "    X_test = cross_transformation(X_test,lst[i])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "scaler = MinMaxScaler(feature_range=(0,1))\n",
    "scaler1 = MinMaxScaler(feature_range=(0,1))\n",
    "scaler.fit(X_train)\n",
    "X_train = scaler.transform(X_train)\n",
    "X_test = scaler.fit_transform(X_test)\n",
    "scaler1.fit(Y_train)\n",
    "Y_train = scaler1.transform(Y_train)\n",
    "Y_test = scaler1.fit_transform(Y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "750/750 [==============================] - 3s 3ms/step - loss: 0.0117 - val_loss: 0.0037\n",
      "Epoch 2/10\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0014 - val_loss: 9.9156e-04\n",
      "Epoch 3/10\n",
      "750/750 [==============================] - 2s 2ms/step - loss: 3.8910e-04 - val_loss: 5.5756e-04\n",
      "Epoch 4/10\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 2.7227e-04 - val_loss: 4.4972e-04\n",
      "Epoch 5/10\n",
      "750/750 [==============================] - 2s 2ms/step - loss: 2.0119e-04 - val_loss: 3.9553e-04\n",
      "Epoch 6/10\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 1.5072e-04 - val_loss: 3.3408e-04\n",
      "Epoch 7/10\n",
      "750/750 [==============================] - 2s 2ms/step - loss: 1.1557e-04 - val_loss: 3.1452e-04: 0s - loss: 1.194\n",
      "Epoch 8/10\n",
      "750/750 [==============================] - 2s 2ms/step - loss: 8.5844e-05 - val_loss: 2.5259e-04\n",
      "Epoch 9/10\n",
      "750/750 [==============================] - 2s 2ms/step - loss: 6.2914e-05 - val_loss: 2.2730e-04\n",
      "Epoch 10/10\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 4.8779e-05 - val_loss: 2.1443e-04\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x204c33273d0>"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "encoding_dim = 4\n",
    "n_inputs = X_train.shape[1]\n",
    "visible = Input(shape=(n_inputs,))\n",
    "e = Dense(n_inputs)(visible)\n",
    "#e = BatchNormalization()(e)\n",
    "e = sigmoid(e)\n",
    "# define bottleneck\n",
    "n_bottleneck = int(n_inputs/2)\n",
    "bottleneck = Dense(n_bottleneck)(e)\n",
    "# define decoder\n",
    "d = Dense(n_inputs)(bottleneck)\n",
    "#d = BatchNormalization()(d)\n",
    "d = sigmoid(d)\n",
    "# output layer\n",
    "output = Dense(n_inputs, activation='linear')(d)\n",
    "# define autoencoder model\n",
    "model = Model(inputs=visible, outputs=output)\n",
    "model.compile(optimizer='adam', loss='mse')\n",
    "model.fit(X_train, X_train,\n",
    "                epochs=10,\n",
    "                batch_size=64,\n",
    "                shuffle=True,\n",
    "                validation_data=(X_test, X_test))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "encoder = Model(inputs=visible, outputs=bottleneck)\n",
    "X_test_D = encoder.predict(X_test)\n",
    "X_train_D = encoder.predict(X_train)\n",
    "# scaler_en = MinMaxScaler(feature_range=(0,1))\n",
    "# X_train_D = scaler_en.fit_transform(X_train_D)\n",
    "# X_test_D = scaler_en.transform(X_test_D)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "750/750 [==============================] - 2s 2ms/step - loss: 0.0270 - val_loss: 0.0057\n",
      "Epoch 2/10\n",
      "750/750 [==============================] - 1s 2ms/step - loss: 0.0019 - val_loss: 0.0017\n",
      "Epoch 3/10\n",
      "750/750 [==============================] - 1s 2ms/step - loss: 8.7740e-04 - val_loss: 0.0011\n",
      "Epoch 4/10\n",
      "750/750 [==============================] - 1s 2ms/step - loss: 5.4178e-04 - val_loss: 7.5895e-04\n",
      "Epoch 5/10\n",
      "750/750 [==============================] - 1s 2ms/step - loss: 3.6144e-04 - val_loss: 5.4923e-04\n",
      "Epoch 6/10\n",
      "750/750 [==============================] - 1s 2ms/step - loss: 2.4941e-04 - val_loss: 4.1810e-04\n",
      "Epoch 7/10\n",
      "750/750 [==============================] - 1s 2ms/step - loss: 1.7294e-04 - val_loss: 2.9609e-04\n",
      "Epoch 8/10\n",
      "750/750 [==============================] - 1s 2ms/step - loss: 1.2510e-04 - val_loss: 2.3966e-04\n",
      "Epoch 9/10\n",
      "750/750 [==============================] - 1s 2ms/step - loss: 9.6292e-05 - val_loss: 2.0695e-04\n",
      "Epoch 10/10\n",
      "750/750 [==============================] - 1s 2ms/step - loss: 7.8687e-05 - val_loss: 1.7558e-04\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x204c3c1cdc0>"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "encoding_dim = 4\n",
    "n_inputs = X_train_D.shape[1]\n",
    "visible2 = Input(shape=(n_inputs,))\n",
    "e = Dense(n_inputs)(bottleneck)\n",
    "#e = BatchNormalization()(e)\n",
    "e = sigmoid(e)\n",
    "# define bottleneck\n",
    "n_bottleneck = int(n_inputs/2)\n",
    "bottleneck2 = Dense(n_bottleneck)(e)\n",
    "# define decoder\n",
    "d = Dense(n_inputs)(bottleneck2)\n",
    "#d = BatchNormalization()(d)\n",
    "d = sigmoid(d)\n",
    "# output layer\n",
    "output = Dense(n_inputs, activation='linear')(d)\n",
    "# define autoencoder model\n",
    "model = Model(inputs=e, outputs=output)\n",
    "model.compile(optimizer='adam', loss='mse')\n",
    "model.fit(X_train_D, X_train_D,\n",
    "                epochs=10,\n",
    "                batch_size=64,\n",
    "                shuffle=True,\n",
    "                validation_data=(X_test_D, X_test_D))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "encoder2 = Model(inputs=e, outputs=bottleneck2)\n",
    "X_test_D = encoder2.predict(X_test_D)\n",
    "X_train_D = encoder2.predict(X_train_D)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "750/750 [==============================] - 1s 1ms/step - loss: 0.1580 - val_loss: 0.0547\n",
      "Epoch 2/10\n",
      "750/750 [==============================] - 1s 1ms/step - loss: 0.0244 - val_loss: 0.0244\n",
      "Epoch 3/10\n",
      "750/750 [==============================] - 1s 1ms/step - loss: 0.0094 - val_loss: 0.0105\n",
      "Epoch 4/10\n",
      "750/750 [==============================] - 1s 1ms/step - loss: 0.0043 - val_loss: 0.0062\n",
      "Epoch 5/10\n",
      "750/750 [==============================] - 1s 1ms/step - loss: 0.0027 - val_loss: 0.0042\n",
      "Epoch 6/10\n",
      "750/750 [==============================] - 1s 1ms/step - loss: 0.0019 - val_loss: 0.0032\n",
      "Epoch 7/10\n",
      "750/750 [==============================] - 1s 1ms/step - loss: 0.0014 - val_loss: 0.0024\n",
      "Epoch 8/10\n",
      "750/750 [==============================] - 1s 1ms/step - loss: 0.0011 - val_loss: 0.0019\n",
      "Epoch 9/10\n",
      "750/750 [==============================] - 1s 1ms/step - loss: 8.3328e-04 - val_loss: 0.0016\n",
      "Epoch 10/10\n",
      "750/750 [==============================] - 1s 1ms/step - loss: 6.7479e-04 - val_loss: 0.0013\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x2278c4b3d60>"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "encoding_dim = 4\n",
    "n_inputs = X_train_D.shape[1]\n",
    "visible3 = Input(shape=(n_inputs,))\n",
    "e = Dense(n_inputs)(bottleneck2)\n",
    "#e = BatchNormalization()(e)\n",
    "e = sigmoid(e)\n",
    "# define bottleneck\n",
    "n_bottleneck = int(n_inputs/2)\n",
    "bottleneck3 = Dense(n_bottleneck)(e)\n",
    "# define decoder\n",
    "d = Dense(n_inputs)(bottleneck3)\n",
    "#d = BatchNormalization()(d)\n",
    "d = sigmoid(d)\n",
    "# output layer\n",
    "output = Dense(n_inputs, activation='linear')(d)\n",
    "# define autoencoder model\n",
    "model = Model(inputs=e, outputs=output)\n",
    "model.compile(optimizer='adam', loss='mse')\n",
    "model.fit(X_train_D, X_train_D,\n",
    "                epochs=10,\n",
    "                batch_size=64,\n",
    "                shuffle=True,\n",
    "                validation_data=(X_test_D, X_test_D))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_trainD_D = X_train_D.reshape(X_train_D.shape[0],1,X_train_D.shape[1])\n",
    "X_testD_D = X_test_D.reshape(X_test_D.shape[0],1,X_test_D.shape[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "input_layer = Input(shape=(Y_train.shape[-1],))\n",
    "# L1 = LSTM(256,return_sequences=True)(input_layer)\n",
    "# L2 = LSTM(256,return_sequences=True)(L1)\n",
    "# L3 = Conv1D(256,kernel_size=3,padding='same')(L2)\n",
    "# LM1 = LSTM(512,return_sequences=True)(L3)\n",
    "# LM2 = LSTM(512,return_sequences=True)(LM1)\n",
    "# LM3 = Conv1D(512,kernel_size=3,padding='same')(LM2)\n",
    "# L4 = Flatten()(LM3)\n",
    "L5 = Dense(units=64,activation='relu',kernel_initializer='uniform')(input_layer)\n",
    "#L6 = BatchNormalization()(L5)\n",
    "L6 = Dense(units=64,activation='relu',kernel_initializer='uniform')(L5)\n",
    "L7 = Dense(units=64,activation='relu',kernel_initializer='uniform')(L6)\n",
    "L8 = Add()([L5,L7])\n",
    "L9 = Dense(units=128,activation='relu',kernel_initializer='uniform')(L8)\n",
    "#L10 = BatchNormalization()(L9)\n",
    "L10 = Dense(units=128,activation='relu',kernel_initializer='uniform')(L9)\n",
    "L11 = Dense(units=128,activation='relu',kernel_initializer='uniform')(L10)\n",
    "L12 = Add()([L9,L11])\n",
    "# L13 = Dense(units=512,activation='relu',kernel_initializer='uniform')(L12)\n",
    "# L14 = Dense(units=512,activation='relu',kernel_initializer='uniform')(L13)\n",
    "# L15 = Dense(units=512,activation='relu',kernel_initializer='uniform')(L14)\n",
    "# L16 = Add()([L13,L15])\n",
    "# L7 = Dense(units=64,activation='relu')(L6)\n",
    "# L6 = Dense(units=1224,activation='relu')(L5)\n",
    "# L7 = Dense(units=1224,activation='relu')(L6)\n",
    "L17 = Dense(units=X_train.shape[1],activation='sigmoid')(L12)\n",
    "dnn_model = tensorflow.keras.Model(inputs=input_layer, outputs=L17)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/100\n",
      "750/750 [==============================] - 3s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0435 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0445\n",
      "Epoch 2/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0439 - val_loss: 0.0027 - val_root_mean_squared_error: 0.0516\n",
      "Epoch 3/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0020 - root_mean_squared_error: 0.0444 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0459\n",
      "Epoch 4/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0441 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0456\n",
      "Epoch 5/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0436 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0443\n",
      "Epoch 6/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0437 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0451\n",
      "Epoch 7/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0433 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0453\n",
      "Epoch 8/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0436 - val_loss: 0.0025 - val_root_mean_squared_error: 0.0501\n",
      "Epoch 9/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0435 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0443\n",
      "Epoch 10/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0434 - val_loss: 0.0022 - val_root_mean_squared_error: 0.0464\n",
      "Epoch 11/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0432 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0433\n",
      "Epoch 12/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0433 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0433\n",
      "Epoch 13/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0434 - val_loss: 0.0022 - val_root_mean_squared_error: 0.0465\n",
      "Epoch 14/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0433 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0457\n",
      "Epoch 15/100\n",
      "750/750 [==============================] - 3s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0434 - val_loss: 0.0023 - val_root_mean_squared_error: 0.0483\n",
      "Epoch 16/100\n",
      "750/750 [==============================] - 3s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0426 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0441\n",
      "Epoch 17/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0429 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0443\n",
      "Epoch 18/100\n",
      "750/750 [==============================] - 3s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0427 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0449\n",
      "Epoch 19/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0430 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0460\n",
      "Epoch 20/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0430 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0443\n",
      "Epoch 21/100\n",
      "750/750 [==============================] - 3s 3ms/step - loss: 0.0019 - root_mean_squared_error: 0.0431 - val_loss: 0.0022 - val_root_mean_squared_error: 0.0470\n",
      "Epoch 22/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0425 - val_loss: 0.0024 - val_root_mean_squared_error: 0.0485\n",
      "Epoch 23/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0428 - val_loss: 0.0031 - val_root_mean_squared_error: 0.0561\n",
      "Epoch 24/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0428 - val_loss: 0.0026 - val_root_mean_squared_error: 0.0509\n",
      "Epoch 25/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0429 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0453: 0s - loss: 0.0018 - root_mean_squared_\n",
      "Epoch 26/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0425 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0431\n",
      "Epoch 27/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0426 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0448\n",
      "Epoch 28/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0428 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0462\n",
      "Epoch 29/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0426 - val_loss: 0.0023 - val_root_mean_squared_error: 0.0476\n",
      "Epoch 30/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0420 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0447\n",
      "Epoch 31/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0425 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0452\n",
      "Epoch 32/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0422 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0457\n",
      "Epoch 33/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0422 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0432\n",
      "Epoch 34/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0427 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0453\n",
      "Epoch 35/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0424 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0434\n",
      "Epoch 36/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0424 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0438\n",
      "Epoch 37/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0423 - val_loss: 0.0018 - val_root_mean_squared_error: 0.0426\n",
      "Epoch 38/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0421 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0463\n",
      "Epoch 39/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0018 - root_mean_squared_error: 0.0421 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0453\n",
      "Epoch 40/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0018 - root_mean_squared_error: 0.0421 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0432\n",
      "Epoch 41/100\n",
      "750/750 [==============================] - 6s 8ms/step - loss: 0.0018 - root_mean_squared_error: 0.0421 - val_loss: 0.0025 - val_root_mean_squared_error: 0.0496\n",
      "Epoch 42/100\n",
      "750/750 [==============================] - 4s 6ms/step - loss: 0.0018 - root_mean_squared_error: 0.0419 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0456\n",
      "Epoch 43/100\n",
      "750/750 [==============================] - 4s 5ms/step - loss: 0.0018 - root_mean_squared_error: 0.0419 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0432\n",
      "Epoch 44/100\n",
      "750/750 [==============================] - 4s 5ms/step - loss: 0.0018 - root_mean_squared_error: 0.0419 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0437\n",
      "Epoch 45/100\n",
      "750/750 [==============================] - 4s 5ms/step - loss: 0.0017 - root_mean_squared_error: 0.0418 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0433\n",
      "Epoch 46/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0018 - root_mean_squared_error: 0.0419 - val_loss: 0.0022 - val_root_mean_squared_error: 0.0469\n",
      "Epoch 47/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0018 - root_mean_squared_error: 0.0419 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0447\n",
      "Epoch 48/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0017 - root_mean_squared_error: 0.0418 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0432\n",
      "Epoch 49/100\n",
      "750/750 [==============================] - 4s 6ms/step - loss: 0.0018 - root_mean_squared_error: 0.0420 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0444\n",
      "Epoch 50/100\n",
      "750/750 [==============================] - 4s 5ms/step - loss: 0.0017 - root_mean_squared_error: 0.0415 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0433\n",
      "Epoch 51/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0017 - root_mean_squared_error: 0.0416 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0447\n",
      "Epoch 52/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0017 - root_mean_squared_error: 0.0416 - val_loss: 0.0024 - val_root_mean_squared_error: 0.0492\n",
      "Epoch 53/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0018 - root_mean_squared_error: 0.0420 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0451\n",
      "Epoch 54/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0018 - root_mean_squared_error: 0.0419 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0435\n",
      "Epoch 55/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0018 - root_mean_squared_error: 0.0419 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0431\n",
      "Epoch 56/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0415 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0434\n",
      "Epoch 57/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0412 - val_loss: 0.0022 - val_root_mean_squared_error: 0.0466\n",
      "Epoch 58/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0417 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0431\n",
      "Epoch 59/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0412 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0438\n",
      "Epoch 60/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0413 - val_loss: 0.0022 - val_root_mean_squared_error: 0.0469\n",
      "Epoch 61/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0414 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0431\n",
      "Epoch 62/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0415 - val_loss: 0.0018 - val_root_mean_squared_error: 0.0428\n",
      "Epoch 63/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0414 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0435\n",
      "Epoch 64/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0416 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0439\n",
      "Epoch 65/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0414 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0461\n",
      "Epoch 66/100\n",
      "750/750 [==============================] - 3s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0411 - val_loss: 0.0023 - val_root_mean_squared_error: 0.0477\n",
      "Epoch 67/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0017 - root_mean_squared_error: 0.0418 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0442\n",
      "Epoch 68/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0412 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0437\n",
      "Epoch 69/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0017 - root_mean_squared_error: 0.0411 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0435\n",
      "Epoch 70/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0412 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0460\n",
      "Epoch 71/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0410 - val_loss: 0.0022 - val_root_mean_squared_error: 0.0467\n",
      "Epoch 72/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0017 - root_mean_squared_error: 0.0409 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0436\n",
      "Epoch 73/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0410 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0434\n",
      "Epoch 74/100\n",
      "750/750 [==============================] - 3s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0413 - val_loss: 0.0018 - val_root_mean_squared_error: 0.0429\n",
      "Epoch 75/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0411 - val_loss: 0.0022 - val_root_mean_squared_error: 0.0465\n",
      "Epoch 76/100\n",
      "750/750 [==============================] - 3s 4ms/step - loss: 0.0017 - root_mean_squared_error: 0.0411 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0446\n",
      "Epoch 77/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0412 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0443\n",
      "Epoch 78/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0411 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0431\n",
      "Epoch 79/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0407 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0432\n",
      "Epoch 80/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0411 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0446\n",
      "Epoch 81/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0407 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0436\n",
      "Epoch 82/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0411 - val_loss: 0.0018 - val_root_mean_squared_error: 0.0427\n",
      "Epoch 83/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0414 - val_loss: 0.0034 - val_root_mean_squared_error: 0.0582\n",
      "Epoch 84/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0410 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0435\n",
      "Epoch 85/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0409 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0432\n",
      "Epoch 86/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0411 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0433\n",
      "Epoch 87/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0408 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0432\n",
      "Epoch 88/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0016 - root_mean_squared_error: 0.0406 - val_loss: 0.0021 - val_root_mean_squared_error: 0.0456\n",
      "Epoch 89/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0411 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0444\n",
      "Epoch 90/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0410 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0435\n",
      "Epoch 91/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0016 - root_mean_squared_error: 0.0406 - val_loss: 0.0018 - val_root_mean_squared_error: 0.0427\n",
      "Epoch 92/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0409 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0435\n",
      "Epoch 93/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0016 - root_mean_squared_error: 0.0406 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0450\n",
      "Epoch 94/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0016 - root_mean_squared_error: 0.0405 - val_loss: 0.0018 - val_root_mean_squared_error: 0.0428\n",
      "Epoch 95/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0016 - root_mean_squared_error: 0.0406 - val_loss: 0.0025 - val_root_mean_squared_error: 0.0497\n",
      "Epoch 96/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0016 - root_mean_squared_error: 0.0406 - val_loss: 0.0020 - val_root_mean_squared_error: 0.0445\n",
      "Epoch 97/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0016 - root_mean_squared_error: 0.0403 - val_loss: 0.0018 - val_root_mean_squared_error: 0.0430\n",
      "Epoch 98/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0016 - root_mean_squared_error: 0.0403 - val_loss: 0.0018 - val_root_mean_squared_error: 0.0430\n",
      "Epoch 99/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0017 - root_mean_squared_error: 0.0407 - val_loss: 0.0018 - val_root_mean_squared_error: 0.0429\n",
      "Epoch 100/100\n",
      "750/750 [==============================] - 2s 3ms/step - loss: 0.0016 - root_mean_squared_error: 0.0406 - val_loss: 0.0019 - val_root_mean_squared_error: 0.0439\n"
     ]
    }
   ],
   "source": [
    "# opt = Adam(lr=0.001)\n",
    "val = []\n",
    "lr_schedule = tensorflow.keras.optimizers.schedules.ExponentialDecay(\n",
    "    initial_learning_rate=0.1,\n",
    "    decay_steps=100000,\n",
    "    decay_rate=0.09)\n",
    "opt = SGD(learning_rate=0.01,momentum=0.4)\n",
    "dnn_model.compile('RMSprop','mse',metrics=RootMeanSquaredError())\n",
    "hist = dnn_model.fit(Y_train, X_train, 64,epochs=100,shuffle=True,validation_data=(Y_test, X_test))\n",
    "val.append(hist.history['loss'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "Y_pred = dnn_model.predict(Y_test)\n",
    "u = np.sum(np.square(X_test-Y_pred))\n",
    "v = np.sum(np.square(X_test - np.mean(X_test)))\n",
    "R = 1 - u/v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.9620188092638828"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "R"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"model\"\n",
      "__________________________________________________________________________________________________\n",
      " Layer (type)                   Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      " input_1 (InputLayer)           [(None, 7)]          0           []                               \n",
      "                                                                                                  \n",
      " dense (Dense)                  (None, 64)           512         ['input_1[0][0]']                \n",
      "                                                                                                  \n",
      " batch_normalization (BatchNorm  (None, 64)          256         ['dense[0][0]']                  \n",
      " alization)                                                                                       \n",
      "                                                                                                  \n",
      " dense_1 (Dense)                (None, 64)           4160        ['batch_normalization[0][0]']    \n",
      "                                                                                                  \n",
      " add (Add)                      (None, 64)           0           ['dense[0][0]',                  \n",
      "                                                                  'dense_1[0][0]']                \n",
      "                                                                                                  \n",
      " dense_2 (Dense)                (None, 64)           4160        ['add[0][0]']                    \n",
      "                                                                                                  \n",
      " batch_normalization_1 (BatchNo  (None, 64)          256         ['dense_2[0][0]']                \n",
      " rmalization)                                                                                     \n",
      "                                                                                                  \n",
      " dense_3 (Dense)                (None, 64)           4160        ['batch_normalization_1[0][0]']  \n",
      "                                                                                                  \n",
      " add_1 (Add)                    (None, 64)           0           ['dense_2[0][0]',                \n",
      "                                                                  'dense_3[0][0]']                \n",
      "                                                                                                  \n",
      " dense_4 (Dense)                (None, 4)            260         ['add_1[0][0]']                  \n",
      "                                                                                                  \n",
      "==================================================================================================\n",
      "Total params: 13,764\n",
      "Trainable params: 13,508\n",
      "Non-trainable params: 256\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "dnn_model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/100\n",
      "891/891 [==============================] - 4s 3ms/step - loss: 0.4207 - root_mean_squared_error: 0.6486 - val_loss: 0.0511 - val_root_mean_squared_error: 0.2260\n",
      "Epoch 2/100\n",
      "891/891 [==============================] - 3s 3ms/step - loss: 0.0362 - root_mean_squared_error: 0.1903 - val_loss: 0.0405 - val_root_mean_squared_error: 0.2012\n",
      "Epoch 3/100\n",
      "891/891 [==============================] - 3s 3ms/step - loss: 0.0333 - root_mean_squared_error: 0.1824 - val_loss: 0.0401 - val_root_mean_squared_error: 0.2003\n",
      "Epoch 4/100\n",
      "891/891 [==============================] - 3s 3ms/step - loss: 0.0321 - root_mean_squared_error: 0.1792 - val_loss: 0.0360 - val_root_mean_squared_error: 0.1896\n",
      "Epoch 5/100\n",
      "891/891 [==============================] - 3s 3ms/step - loss: 0.0316 - root_mean_squared_error: 0.1777 - val_loss: 0.0404 - val_root_mean_squared_error: 0.2010\n",
      "Epoch 6/100\n",
      "891/891 [==============================] - 3s 3ms/step - loss: 0.0312 - root_mean_squared_error: 0.1766 - val_loss: 0.0432 - val_root_mean_squared_error: 0.2077\n",
      "Epoch 7/100\n",
      "891/891 [==============================] - 3s 3ms/step - loss: 0.0309 - root_mean_squared_error: 0.1758 - val_loss: 0.0398 - val_root_mean_squared_error: 0.1996\n",
      "Epoch 8/100\n",
      "891/891 [==============================] - 3s 3ms/step - loss: 0.0308 - root_mean_squared_error: 0.1754 - val_loss: 0.0344 - val_root_mean_squared_error: 0.1855\n",
      "Epoch 9/100\n",
      "891/891 [==============================] - 3s 4ms/step - loss: 0.0306 - root_mean_squared_error: 0.1750 - val_loss: 0.0362 - val_root_mean_squared_error: 0.1902ot_mean_squared_error: 0.175\n",
      "Epoch 10/100\n",
      "891/891 [==============================] - 3s 4ms/step - loss: 0.0305 - root_mean_squared_error: 0.1747 - val_loss: 0.0327 - val_root_mean_squared_error: 0.1808\n",
      "Epoch 11/100\n",
      "891/891 [==============================] - 4s 4ms/step - loss: 0.0303 - root_mean_squared_error: 0.1741 - val_loss: 0.0399 - val_root_mean_squared_error: 0.1998\n",
      "Epoch 12/100\n",
      "891/891 [==============================] - 4s 4ms/step - loss: 0.0303 - root_mean_squared_error: 0.1742 - val_loss: 0.0331 - val_root_mean_squared_error: 0.1821\n",
      "Epoch 13/100\n",
      "891/891 [==============================] - 3s 4ms/step - loss: 0.0301 - root_mean_squared_error: 0.1736 - val_loss: 0.0385 - val_root_mean_squared_error: 0.1961\n",
      "Epoch 14/100\n",
      "409/891 [============>.................] - ETA: 1s - loss: 0.0305 - root_mean_squared_error: 0.1746"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-24-0424062c5330>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      9\u001b[0m \u001b[0moptimizer\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mkeras\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moptimizers\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mAdam\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlearning_rate\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0.001\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     10\u001b[0m \u001b[0mcombined_model\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcompile\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'RMSprop'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0moptimizer\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;34m'mse'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mmetrics\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mRootMeanSquaredError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 11\u001b[1;33m \u001b[0mcombined_model\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mX_train_D\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mX_train\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mY_train\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m64\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mepochs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m100\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mshuffle\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mvalidation_data\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mX_test_D\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mX_test\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mY_test\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\keras\\utils\\traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m     62\u001b[0m     \u001b[0mfiltered_tb\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     63\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 64\u001b[1;33m       \u001b[1;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     65\u001b[0m     \u001b[1;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m  \u001b[1;31m# pylint: disable=broad-except\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     66\u001b[0m       \u001b[0mfiltered_tb\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\keras\\engine\\training.py\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[0;32m   1214\u001b[0m                 _r=1):\n\u001b[0;32m   1215\u001b[0m               \u001b[0mcallbacks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mon_train_batch_begin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1216\u001b[1;33m               \u001b[0mtmp_logs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain_function\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0miterator\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1217\u001b[0m               \u001b[1;32mif\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshould_sync\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1218\u001b[0m                 \u001b[0mcontext\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0masync_wait\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\tensorflow\\python\\util\\traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m    148\u001b[0m     \u001b[0mfiltered_tb\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    149\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 150\u001b[1;33m       \u001b[1;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    151\u001b[0m     \u001b[1;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    152\u001b[0m       \u001b[0mfiltered_tb\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\tensorflow\\python\\eager\\def_function.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *args, **kwds)\u001b[0m\n\u001b[0;32m    908\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    909\u001b[0m       \u001b[1;32mwith\u001b[0m \u001b[0mOptionalXlaContext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_jit_compile\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 910\u001b[1;33m         \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    911\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    912\u001b[0m       \u001b[0mnew_tracing_count\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mexperimental_get_tracing_count\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\tensorflow\\python\\eager\\def_function.py\u001b[0m in \u001b[0;36m_call\u001b[1;34m(self, *args, **kwds)\u001b[0m\n\u001b[0;32m    940\u001b[0m       \u001b[1;31m# In this case we have created variables on the first call, so we run the\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    941\u001b[0m       \u001b[1;31m# defunned version which is guaranteed to never create variables.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 942\u001b[1;33m       \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_stateless_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[1;33m)\u001b[0m  \u001b[1;31m# pylint: disable=not-callable\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    943\u001b[0m     \u001b[1;32melif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_stateful_fn\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    944\u001b[0m       \u001b[1;31m# Release the lock early so that multiple threads can perform the call\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\tensorflow\\python\\eager\\function.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   3128\u001b[0m       (graph_function,\n\u001b[0;32m   3129\u001b[0m        filtered_flat_args) = self._maybe_define_function(args, kwargs)\n\u001b[1;32m-> 3130\u001b[1;33m     return graph_function._call_flat(\n\u001b[0m\u001b[0;32m   3131\u001b[0m         filtered_flat_args, captured_inputs=graph_function.captured_inputs)  # pylint: disable=protected-access\n\u001b[0;32m   3132\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\tensorflow\\python\\eager\\function.py\u001b[0m in \u001b[0;36m_call_flat\u001b[1;34m(self, args, captured_inputs, cancellation_manager)\u001b[0m\n\u001b[0;32m   1957\u001b[0m         and executing_eagerly):\n\u001b[0;32m   1958\u001b[0m       \u001b[1;31m# No tape is watching; skip to running the function.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1959\u001b[1;33m       return self._build_call_outputs(self._inference_function.call(\n\u001b[0m\u001b[0;32m   1960\u001b[0m           ctx, args, cancellation_manager=cancellation_manager))\n\u001b[0;32m   1961\u001b[0m     forward_backward = self._select_forward_and_backward_functions(\n",
      "\u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\tensorflow\\python\\eager\\function.py\u001b[0m in \u001b[0;36mcall\u001b[1;34m(self, ctx, args, cancellation_manager)\u001b[0m\n\u001b[0;32m    596\u001b[0m       \u001b[1;32mwith\u001b[0m \u001b[0m_InterpolateFunctionError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    597\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mcancellation_manager\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 598\u001b[1;33m           outputs = execute.execute(\n\u001b[0m\u001b[0;32m    599\u001b[0m               \u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msignature\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    600\u001b[0m               \u001b[0mnum_outputs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_num_outputs\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\tensorflow\\python\\eager\\execute.py\u001b[0m in \u001b[0;36mquick_execute\u001b[1;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[0;32m     56\u001b[0m   \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     57\u001b[0m     \u001b[0mctx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mensure_initialized\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 58\u001b[1;33m     tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\n\u001b[0m\u001b[0;32m     59\u001b[0m                                         inputs, attrs, num_outputs)\n\u001b[0;32m     60\u001b[0m   \u001b[1;32mexcept\u001b[0m \u001b[0mcore\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_NotOkStatusException\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "linear_model = LinearModel(units=Y_train.shape[1],kernel_initializer='he_uniform')\n",
    "# dnn_model = Sequential([Dense(60,'relu',kernel_initializer='he_uniform'),\n",
    "#                         #BatchNormalization(),\n",
    "#                         Dense(60,'relu',kernel_initializer='he_uniform'),\n",
    "#                         #BatchNormalization(),\n",
    "#                         Dense(units=Y_train.shape[1],activation='sigmoid')])\n",
    "\n",
    "combined_model = WideDeepModel(linear_model, dnn_model)\n",
    "optimizer = tensorflow.keras.optimizers.Adam(learning_rate=0.001)\n",
    "combined_model.compile(['RMSprop',optimizer],'mse',metrics=[RootMeanSquaredError()])\n",
    "combined_model.fit([X_train_D,X_train], Y_train, 64,epochs=100,shuffle=True,validation_data=([X_test_D,X_test], Y_test))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.6111491065175603"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Y_pred = combined_model.predict([X_test,X_test_D])\n",
    "u = np.sum(np.square(Y_test-Y_pred))\n",
    "v = np.sum(np.square(Y_test - np.mean(Y_test)))\n",
    "R = 1 - u/v\n",
    "R"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[-2.1592453e+00,  8.1182891e-01,  6.3967603e-01, ...,\n",
       "        -7.4185652e-01,  1.4074724e-02, -6.6616958e-01],\n",
       "       [-1.1519372e+00,  5.3086495e-01,  3.2313559e-02, ...,\n",
       "        -9.4839722e-01,  7.8628308e-01, -1.2140973e+00],\n",
       "       [-1.1991040e+00,  2.6105663e-01,  2.1478173e-03, ...,\n",
       "        -1.3801205e-01,  2.2189210e-01, -5.3747964e-01],\n",
       "       ...,\n",
       "       [-1.7238916e+00,  5.8732909e-01,  3.0360186e-01, ...,\n",
       "        -4.7154999e-01,  6.8605661e-01, -1.2222743e+00],\n",
       "       [-1.0571992e+00,  6.1388552e-01,  2.4979427e-01, ...,\n",
       "        -1.2374196e+00,  7.8614283e-01, -9.1210401e-01],\n",
       "       [-1.0953431e+00,  5.7810273e-02,  2.3952276e-01, ...,\n",
       "        -6.6699106e-01,  4.6150452e-01, -1.4515446e+00]], dtype=float32)"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_train_D"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "data = np.concatenate((X_train_D,Y_train),axis=1)\n",
    "dataT = np.concatenate((X_test_D,Y_test),axis=1)\n",
    "data = np.concatenate((data,dataT),axis=0)\n",
    "data = pd.DataFrame(data)\n",
    "data.to_csv('AutoDecoData.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(60000, 37)"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "ec83997b836a02912a2512ee199511c060294bd196620db161be9b0d7a4ae52a"
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
