{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from numpy import *\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [],
   "source": [
    "N = 2000  # size of dataset\n",
    "Data = pd.DataFrame(zeros((N, 3)), columns=['x1', 'x2', 'y'])\n",
    "index = 0\n",
    "for i in range(N):\n",
    "    [x, y] = random.rand(2) * 2 - 1\n",
    "    if sin(pi * x) * 0.8 - y >= 0:\n",
    "        Data.iloc[i, :] = [x, y, 1]\n",
    "    elif sin(pi * x) * 0.8 - y < 0:\n",
    "        Data.iloc[i, :] = [x, y, 0]\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "class Layer:\n",
    "    def __init__(self, input_num, neuron_num, activation='sigmoid'):\n",
    "        self.neurons = neuron_num\n",
    "        self.activation_func = activation\n",
    "        self.weights = random.randn(input_num, neuron_num) * sqrt(1 / neuron_num)\n",
    "        self.bias = random.rand(neuron_num) * 0.1\n",
    "        self.delta = None\n",
    "\n",
    "    def forward(self, input_data):\n",
    "        OUTPUT = self.activation(self.weights.T @ input_data + self.bias)\n",
    "        return OUTPUT\n",
    "\n",
    "    def activation(self, x):\n",
    "        if self.activation_func == 'sigmoid':\n",
    "            return 1 / (1 + exp(-x))\n",
    "\n",
    "    def activation_derivative(self, x):\n",
    "        if self.activation_func == 'sigmoid':\n",
    "            return x * (1 - x)\n",
    "\n",
    "\n",
    "class Neural_Network:\n",
    "    def __init__(self):\n",
    "        self.layer_list = []\n",
    "        self.architecture = []\n",
    "\n",
    "    def add_layer(self, layer):\n",
    "        self.layer_list.append(layer)\n",
    "        self.architecture.append(layer.neurons)\n",
    "\n",
    "    def network_output(self, input_data):\n",
    "        X = input_data\n",
    "        Output_of_each_layer = []\n",
    "        for layer in self.layer_list:\n",
    "            X = layer.forward(X)\n",
    "            Output_of_each_layer.append(X)\n",
    "        return Output_of_each_layer\n",
    "\n",
    "    def read_dataset(self, X, y, dividing_ratio):\n",
    "        self.data_size = X.shape[0]\n",
    "        self.train_size = int(self.data_size * dividing_ratio)\n",
    "        self.train_X = X[:self.train_size, :]\n",
    "        self.train_y = y[:self.train_size]\n",
    "        self.val_X = X[self.train_size:, :]\n",
    "        self.val_y = y[self.train_size:]\n",
    "\n",
    "    def backpropagation(self, X, y, learning_rate):\n",
    "        Hidden_layer_num = len(self.layer_list)\n",
    "        Output = self.network_output(X)\n",
    "        Output.insert(0, X)\n",
    "        for i in reversed(range(Hidden_layer_num)):\n",
    "            layer = self.layer_list[i]\n",
    "            if layer == self.layer_list[-1]:\n",
    "                layer.delta = (Output[-1] - y) * layer.activation_derivative(Output[-1])\n",
    "            else:\n",
    "                next_layer = self.layer_list[i + 1]\n",
    "                layer.delta = layer.activation_derivative(Output[i + 1]) * (next_layer.weights @ next_layer.delta)\n",
    "            layer.weights -= learning_rate * (Output[i].reshape(-1, 1) @ layer.delta.reshape(1, -1))\n",
    "            layer.bias -= learning_rate * layer.delta\n",
    "\n",
    "    def mse_loss_func(self, X, y):\n",
    "        MSE = 0\n",
    "        Input_data_size = X.shape[0]\n",
    "        for i in range(Input_data_size):\n",
    "            MSE += (self.network_output(X[i, :])[-1] - y[i]) ** 2\n",
    "        return MSE[0] / Input_data_size\n",
    "\n",
    "    def training(self, epochs, learning_rate, batch_size):\n",
    "        self.train_loss = [self.mse_loss_func(self.train_X, self.train_y)]\n",
    "        self.val_loss = [self.mse_loss_func(self.val_X, self.val_y)]\n",
    "        self.accuracy = [self.accuracy_func(self.val_X, self.val_y)]\n",
    "        self.boundary = [self.boundary_func()]\n",
    "        for epoch in range(epochs):\n",
    "            Batch_Index = random.choice(self.train_size, batch_size, replace=False)\n",
    "            for i in Batch_Index:\n",
    "                self.backpropagation(self.train_X[i, :], self.train_y[i], learning_rate)\n",
    "            if epoch % 10 == 0:\n",
    "                t_loss = self.mse_loss_func(self.train_X, self.train_y)\n",
    "                v_loss = self.mse_loss_func(self.val_X, self.val_y)\n",
    "                accur = self.accuracy_func(self.val_X, self.val_y)\n",
    "                self.train_loss.append(t_loss)\n",
    "                self.val_loss.append(v_loss)\n",
    "                self.accuracy.append(accur)\n",
    "                self.boundary.append(self.boundary_func())\n",
    "                print('Epochs: %d, train loss: %f, val loss: %f, accuracy: %.3f' % (\n",
    "                    epoch, t_loss, v_loss, accur))\n",
    "\n",
    "    def evaluation(self, X, threshold=0.9):\n",
    "        if self.network_output(X)[-1] > threshold:\n",
    "            return 1\n",
    "        else:\n",
    "            return 0\n",
    "\n",
    "    def accuracy_func(self, val_X, val_y, threshold=0.9):\n",
    "        val_data_num = val_X.shape[0]\n",
    "        error_num = 0\n",
    "        for i in range(val_data_num):\n",
    "            if self.evaluation(val_X[i, :], threshold) != val_y[i]:\n",
    "                error_num += 1\n",
    "        return (1 - error_num / val_data_num) * 100\n",
    "\n",
    "    def boundary_func(self):\n",
    "        x1_range = linspace(self.train_X[:, 0].min(), self.train_X[:, 0].max(), 100)\n",
    "        x2_range = linspace(self.train_X[:, 1].min(), self.train_X[:, 1].max(), 100)\n",
    "        bound_x1 = [];\n",
    "        bound_x2 = []\n",
    "        for i in range(len(x1_range)):\n",
    "            for j in range(len(x2_range) - 1):\n",
    "                y0 = self.evaluation(array([x1_range[i], x2_range[j]]))\n",
    "                y1 = self.evaluation(array([x1_range[i], x2_range[j + 1]]))\n",
    "                if y0 != y1:\n",
    "                    bound_x1.append(x1_range[i])\n",
    "                    bound_x2.append(x2_range[j])\n",
    "        return [bound_x1, bound_x2]\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epochs: 0, train loss: 0.257102, val loss: 0.259451, accuracy: 51.750\n",
      "Epochs: 10, train loss: 0.249857, val loss: 0.249861, accuracy: 51.750\n",
      "Epochs: 20, train loss: 0.249800, val loss: 0.249705, accuracy: 51.750\n",
      "Epochs: 30, train loss: 0.249752, val loss: 0.249609, accuracy: 51.750\n",
      "Epochs: 40, train loss: 0.249691, val loss: 0.249601, accuracy: 51.750\n",
      "Epochs: 50, train loss: 0.249639, val loss: 0.249445, accuracy: 51.750\n",
      "Epochs: 60, train loss: 0.249553, val loss: 0.249536, accuracy: 51.750\n",
      "Epochs: 70, train loss: 0.249467, val loss: 0.249482, accuracy: 51.750\n",
      "Epochs: 80, train loss: 0.249356, val loss: 0.249376, accuracy: 51.750\n",
      "Epochs: 90, train loss: 0.249215, val loss: 0.249033, accuracy: 51.750\n",
      "Epochs: 100, train loss: 0.249029, val loss: 0.248894, accuracy: 51.750\n",
      "Epochs: 110, train loss: 0.248825, val loss: 0.248592, accuracy: 51.750\n",
      "Epochs: 120, train loss: 0.248508, val loss: 0.248317, accuracy: 51.750\n",
      "Epochs: 130, train loss: 0.248084, val loss: 0.248069, accuracy: 51.750\n",
      "Epochs: 140, train loss: 0.247518, val loss: 0.247312, accuracy: 51.750\n",
      "Epochs: 150, train loss: 0.246696, val loss: 0.246804, accuracy: 51.750\n",
      "Epochs: 160, train loss: 0.245360, val loss: 0.245311, accuracy: 51.750\n",
      "Epochs: 170, train loss: 0.243283, val loss: 0.243070, accuracy: 51.750\n",
      "Epochs: 180, train loss: 0.239613, val loss: 0.239533, accuracy: 51.750\n",
      "Epochs: 190, train loss: 0.232603, val loss: 0.232433, accuracy: 51.750\n",
      "Epochs: 200, train loss: 0.217736, val loss: 0.217292, accuracy: 51.750\n",
      "Epochs: 210, train loss: 0.186655, val loss: 0.185557, accuracy: 51.750\n",
      "Epochs: 220, train loss: 0.145265, val loss: 0.142345, accuracy: 51.750\n",
      "Epochs: 230, train loss: 0.120547, val loss: 0.116502, accuracy: 55.500\n",
      "Epochs: 240, train loss: 0.110847, val loss: 0.106519, accuracy: 66.250\n",
      "Epochs: 250, train loss: 0.106973, val loss: 0.102531, accuracy: 71.750\n",
      "Epochs: 260, train loss: 0.105254, val loss: 0.100315, accuracy: 73.000\n",
      "Epochs: 270, train loss: 0.104543, val loss: 0.099405, accuracy: 74.500\n",
      "Epochs: 280, train loss: 0.104039, val loss: 0.099381, accuracy: 76.000\n",
      "Epochs: 290, train loss: 0.104075, val loss: 0.098764, accuracy: 75.750\n",
      "Epochs: 300, train loss: 0.103798, val loss: 0.098770, accuracy: 76.250\n",
      "Epochs: 310, train loss: 0.103617, val loss: 0.099190, accuracy: 77.500\n",
      "Epochs: 320, train loss: 0.103571, val loss: 0.098855, accuracy: 77.500\n",
      "Epochs: 330, train loss: 0.103676, val loss: 0.098586, accuracy: 76.750\n",
      "Epochs: 340, train loss: 0.103494, val loss: 0.099195, accuracy: 78.500\n",
      "Epochs: 350, train loss: 0.103435, val loss: 0.099003, accuracy: 78.500\n",
      "Epochs: 360, train loss: 0.103418, val loss: 0.099145, accuracy: 78.500\n",
      "Epochs: 370, train loss: 0.103907, val loss: 0.098360, accuracy: 76.500\n",
      "Epochs: 380, train loss: 0.103327, val loss: 0.099074, accuracy: 78.500\n",
      "Epochs: 390, train loss: 0.103250, val loss: 0.098548, accuracy: 78.500\n",
      "Epochs: 400, train loss: 0.103161, val loss: 0.098732, accuracy: 78.500\n",
      "Epochs: 410, train loss: 0.103133, val loss: 0.098970, accuracy: 78.500\n",
      "Epochs: 420, train loss: 0.103166, val loss: 0.099279, accuracy: 78.500\n",
      "Epochs: 430, train loss: 0.102863, val loss: 0.098616, accuracy: 78.500\n",
      "Epochs: 440, train loss: 0.102656, val loss: 0.098017, accuracy: 78.500\n",
      "Epochs: 450, train loss: 0.102442, val loss: 0.097798, accuracy: 78.500\n",
      "Epochs: 460, train loss: 0.102186, val loss: 0.097844, accuracy: 78.500\n",
      "Epochs: 470, train loss: 0.101827, val loss: 0.097333, accuracy: 78.500\n",
      "Epochs: 480, train loss: 0.101396, val loss: 0.096812, accuracy: 78.500\n",
      "Epochs: 490, train loss: 0.100910, val loss: 0.096043, accuracy: 78.250\n",
      "Epochs: 500, train loss: 0.100197, val loss: 0.095764, accuracy: 78.500\n",
      "Epochs: 510, train loss: 0.099381, val loss: 0.094990, accuracy: 78.500\n",
      "Epochs: 520, train loss: 0.098428, val loss: 0.093593, accuracy: 78.000\n",
      "Epochs: 530, train loss: 0.097201, val loss: 0.092768, accuracy: 78.500\n",
      "Epochs: 540, train loss: 0.096148, val loss: 0.090853, accuracy: 77.750\n",
      "Epochs: 550, train loss: 0.094258, val loss: 0.089519, accuracy: 78.250\n",
      "Epochs: 560, train loss: 0.092431, val loss: 0.087982, accuracy: 78.500\n",
      "Epochs: 570, train loss: 0.090455, val loss: 0.086092, accuracy: 79.250\n",
      "Epochs: 580, train loss: 0.088330, val loss: 0.083608, accuracy: 79.250\n",
      "Epochs: 590, train loss: 0.086132, val loss: 0.081161, accuracy: 78.750\n",
      "Epochs: 600, train loss: 0.083668, val loss: 0.079342, accuracy: 79.500\n",
      "Epochs: 610, train loss: 0.081221, val loss: 0.076420, accuracy: 78.500\n",
      "Epochs: 620, train loss: 0.078840, val loss: 0.074645, accuracy: 79.500\n",
      "Epochs: 630, train loss: 0.076665, val loss: 0.072811, accuracy: 79.750\n",
      "Epochs: 640, train loss: 0.074035, val loss: 0.068801, accuracy: 78.000\n",
      "Epochs: 650, train loss: 0.071735, val loss: 0.066453, accuracy: 78.250\n",
      "Epochs: 660, train loss: 0.069761, val loss: 0.065864, accuracy: 79.500\n",
      "Epochs: 670, train loss: 0.067398, val loss: 0.063084, accuracy: 79.750\n",
      "Epochs: 680, train loss: 0.065294, val loss: 0.060440, accuracy: 79.500\n",
      "Epochs: 690, train loss: 0.063513, val loss: 0.059128, accuracy: 80.000\n",
      "Epochs: 700, train loss: 0.061975, val loss: 0.058024, accuracy: 81.250\n",
      "Epochs: 710, train loss: 0.060123, val loss: 0.055031, accuracy: 80.000\n",
      "Epochs: 720, train loss: 0.059128, val loss: 0.055743, accuracy: 82.250\n",
      "Epochs: 730, train loss: 0.056984, val loss: 0.052125, accuracy: 81.250\n",
      "Epochs: 740, train loss: 0.055837, val loss: 0.050602, accuracy: 81.000\n",
      "Epochs: 750, train loss: 0.054234, val loss: 0.050527, accuracy: 82.750\n",
      "Epochs: 760, train loss: 0.052909, val loss: 0.047933, accuracy: 81.500\n",
      "Epochs: 770, train loss: 0.051251, val loss: 0.046761, accuracy: 82.500\n",
      "Epochs: 780, train loss: 0.049744, val loss: 0.045930, accuracy: 83.750\n",
      "Epochs: 790, train loss: 0.048751, val loss: 0.045904, accuracy: 86.500\n",
      "Epochs: 800, train loss: 0.046950, val loss: 0.043770, accuracy: 86.000\n",
      "Epochs: 810, train loss: 0.045439, val loss: 0.042398, accuracy: 85.750\n",
      "Epochs: 820, train loss: 0.044150, val loss: 0.040634, accuracy: 84.750\n",
      "Epochs: 830, train loss: 0.042945, val loss: 0.041099, accuracy: 88.500\n",
      "Epochs: 840, train loss: 0.041042, val loss: 0.038637, accuracy: 87.000\n",
      "Epochs: 850, train loss: 0.039586, val loss: 0.037811, accuracy: 87.750\n",
      "Epochs: 860, train loss: 0.039003, val loss: 0.038596, accuracy: 89.500\n",
      "Epochs: 870, train loss: 0.036658, val loss: 0.035488, accuracy: 88.500\n",
      "Epochs: 880, train loss: 0.035291, val loss: 0.034443, accuracy: 88.500\n",
      "Epochs: 890, train loss: 0.034013, val loss: 0.033397, accuracy: 88.500\n",
      "Epochs: 900, train loss: 0.032638, val loss: 0.032467, accuracy: 88.750\n",
      "Epochs: 910, train loss: 0.031252, val loss: 0.031706, accuracy: 89.500\n",
      "Epochs: 920, train loss: 0.030154, val loss: 0.030719, accuracy: 89.250\n",
      "Epochs: 930, train loss: 0.028795, val loss: 0.030217, accuracy: 89.750\n",
      "Epochs: 940, train loss: 0.027671, val loss: 0.029371, accuracy: 89.750\n",
      "Epochs: 950, train loss: 0.026677, val loss: 0.028381, accuracy: 89.500\n",
      "Epochs: 960, train loss: 0.025844, val loss: 0.028246, accuracy: 90.500\n",
      "Epochs: 970, train loss: 0.024903, val loss: 0.027386, accuracy: 90.250\n",
      "Epochs: 980, train loss: 0.024646, val loss: 0.026463, accuracy: 89.500\n",
      "Epochs: 990, train loss: 0.024438, val loss: 0.026203, accuracy: 89.000\n",
      "Epochs: 1000, train loss: 0.022680, val loss: 0.025572, accuracy: 90.500\n",
      "Epochs: 1010, train loss: 0.022091, val loss: 0.024448, accuracy: 90.250\n",
      "Epochs: 1020, train loss: 0.021472, val loss: 0.024420, accuracy: 91.000\n",
      "Epochs: 1030, train loss: 0.021778, val loss: 0.025469, accuracy: 91.500\n",
      "Epochs: 1040, train loss: 0.020523, val loss: 0.023830, accuracy: 91.500\n",
      "Epochs: 1050, train loss: 0.019995, val loss: 0.022728, accuracy: 91.000\n",
      "Epochs: 1060, train loss: 0.019473, val loss: 0.022138, accuracy: 91.250\n",
      "Epochs: 1070, train loss: 0.018992, val loss: 0.021934, accuracy: 91.500\n",
      "Epochs: 1080, train loss: 0.018693, val loss: 0.021140, accuracy: 91.250\n",
      "Epochs: 1090, train loss: 0.018238, val loss: 0.021257, accuracy: 91.500\n",
      "Epochs: 1100, train loss: 0.018864, val loss: 0.022739, accuracy: 91.750\n",
      "Epochs: 1110, train loss: 0.017435, val loss: 0.020380, accuracy: 91.750\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m                         Traceback (most recent call last)",
      "Input \u001B[1;32mIn [4]\u001B[0m, in \u001B[0;36m<cell line: 10>\u001B[1;34m()\u001B[0m\n\u001B[0;32m      8\u001B[0m DNN\u001B[38;5;241m.\u001B[39marchitecture\n\u001B[0;32m      9\u001B[0m DNN\u001B[38;5;241m.\u001B[39mread_dataset(X, y, dividing_ratio\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m0.8\u001B[39m)\n\u001B[1;32m---> 10\u001B[0m \u001B[43mDNN\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mtraining\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m2000\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m0.005\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m1600\u001B[39;49m\u001B[43m)\u001B[49m\n",
      "Input \u001B[1;32mIn [3]\u001B[0m, in \u001B[0;36mNeural_Network.training\u001B[1;34m(self, epochs, learning_rate, batch_size)\u001B[0m\n\u001B[0;32m     82\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mval_loss\u001B[38;5;241m.\u001B[39mappend(v_loss)\n\u001B[0;32m     83\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39maccuracy\u001B[38;5;241m.\u001B[39mappend(accur)\n\u001B[1;32m---> 84\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mboundary\u001B[38;5;241m.\u001B[39mappend(\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mboundary_func\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m)\n\u001B[0;32m     85\u001B[0m \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mEpochs: \u001B[39m\u001B[38;5;132;01m%d\u001B[39;00m\u001B[38;5;124m, train loss: \u001B[39m\u001B[38;5;132;01m%f\u001B[39;00m\u001B[38;5;124m, val loss: \u001B[39m\u001B[38;5;132;01m%f\u001B[39;00m\u001B[38;5;124m, accuracy: \u001B[39m\u001B[38;5;132;01m%.3f\u001B[39;00m\u001B[38;5;124m'\u001B[39m \u001B[38;5;241m%\u001B[39m (\n\u001B[0;32m     86\u001B[0m     epoch, t_loss, v_loss, accur))\n",
      "Input \u001B[1;32mIn [3]\u001B[0m, in \u001B[0;36mNeural_Network.boundary_func\u001B[1;34m(self)\u001B[0m\n\u001B[0;32m    107\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m i \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mrange\u001B[39m(\u001B[38;5;28mlen\u001B[39m(x1_range)):\n\u001B[0;32m    108\u001B[0m     \u001B[38;5;28;01mfor\u001B[39;00m j \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mrange\u001B[39m(\u001B[38;5;28mlen\u001B[39m(x2_range) \u001B[38;5;241m-\u001B[39m \u001B[38;5;241m1\u001B[39m):\n\u001B[1;32m--> 109\u001B[0m         y0 \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mevaluation\u001B[49m\u001B[43m(\u001B[49m\u001B[43marray\u001B[49m\u001B[43m(\u001B[49m\u001B[43m[\u001B[49m\u001B[43mx1_range\u001B[49m\u001B[43m[\u001B[49m\u001B[43mi\u001B[49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mx2_range\u001B[49m\u001B[43m[\u001B[49m\u001B[43mj\u001B[49m\u001B[43m]\u001B[49m\u001B[43m]\u001B[49m\u001B[43m)\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    110\u001B[0m         y1 \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mevaluation(array([x1_range[i], x2_range[j \u001B[38;5;241m+\u001B[39m \u001B[38;5;241m1\u001B[39m]]))\n\u001B[0;32m    111\u001B[0m         \u001B[38;5;28;01mif\u001B[39;00m y0 \u001B[38;5;241m!=\u001B[39m y1:\n",
      "Input \u001B[1;32mIn [3]\u001B[0m, in \u001B[0;36mNeural_Network.evaluation\u001B[1;34m(self, X, threshold)\u001B[0m\n\u001B[0;32m     88\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mevaluation\u001B[39m(\u001B[38;5;28mself\u001B[39m, X, threshold\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m0.9\u001B[39m):\n\u001B[1;32m---> 89\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mnetwork_output\u001B[49m\u001B[43m(\u001B[49m\u001B[43mX\u001B[49m\u001B[43m)\u001B[49m[\u001B[38;5;241m-\u001B[39m\u001B[38;5;241m1\u001B[39m] \u001B[38;5;241m>\u001B[39m threshold:\n\u001B[0;32m     90\u001B[0m         \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;241m1\u001B[39m\n\u001B[0;32m     91\u001B[0m     \u001B[38;5;28;01melse\u001B[39;00m:\n",
      "Input \u001B[1;32mIn [3]\u001B[0m, in \u001B[0;36mNeural_Network.network_output\u001B[1;34m(self, input_data)\u001B[0m\n\u001B[0;32m     33\u001B[0m Output_of_each_layer \u001B[38;5;241m=\u001B[39m []\n\u001B[0;32m     34\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m layer \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mlayer_list:\n\u001B[1;32m---> 35\u001B[0m     X \u001B[38;5;241m=\u001B[39m \u001B[43mlayer\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mforward\u001B[49m\u001B[43m(\u001B[49m\u001B[43mX\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     36\u001B[0m     Output_of_each_layer\u001B[38;5;241m.\u001B[39mappend(X)\n\u001B[0;32m     37\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m Output_of_each_layer\n",
      "Input \u001B[1;32mIn [3]\u001B[0m, in \u001B[0;36mLayer.forward\u001B[1;34m(self, input_data)\u001B[0m\n\u001B[0;32m      9\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mforward\u001B[39m(\u001B[38;5;28mself\u001B[39m, input_data):\n\u001B[1;32m---> 10\u001B[0m     OUTPUT \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mactivation(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mweights\u001B[38;5;241m.\u001B[39mT \u001B[38;5;241m@\u001B[39m input_data \u001B[38;5;241m+\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mbias\u001B[49m)\n\u001B[0;32m     11\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m OUTPUT\n",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m: "
     ]
    }
   ],
   "source": [
    "X = Data[['x1', 'x2']].values\n",
    "y = Data['y'].values\n",
    "DNN = Neural_Network()\n",
    "DNN.add_layer(Layer(2, 8, 'sigmoid'))\n",
    "DNN.add_layer(Layer(8, 16, 'sigmoid'))\n",
    "DNN.add_layer(Layer(16, 8, 'sigmoid'))\n",
    "DNN.add_layer(Layer(8, 1, 'sigmoid'))\n",
    "DNN.architecture\n",
    "DNN.read_dataset(X, y, dividing_ratio=0.8)\n",
    "DNN.training(2000, 0.005, 1600)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% Classification\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}