from numpy import *
import pandas as pd

# %%
N = 2000  # size of dataset
Data = pd.DataFrame(zeros((N, 3)), columns=['x1', 'x2', 'y'])
index = 0
for i in range(N):
    [x, y] = random.rand(2) * 2 - 1
    if sin(pi * x) * 0.8 - y >= 0:
        Data.iloc[i, :] = [x, y, 1]
    elif sin(pi * x) * 0.8 - y < 0:
        Data.iloc[i, :] = [x, y, 0]


# %%
class Layer:
    def __init__(self, input_num, neuron_num, activation='sigmoid'):
        self.neurons = neuron_num
        self.activation_func = activation
        self.weights = random.randn(input_num, neuron_num) * sqrt(1 / neuron_num)
        self.bias = random.rand(neuron_num) * 0.1
        self.delta = None

    def forward(self, input_data):
        OUTPUT = self.activation(self.weights.T @ input_data + self.bias)
        return OUTPUT

    def activation(self, x):
        if self.activation_func == 'sigmoid':
            return 1 / (1 + exp(-x))

    def activation_derivative(self, x):
        if self.activation_func == 'sigmoid':
            return x * (1 - x)


class Neural_Network:
    def __init__(self):
        self.layer_list = []
        self.architecture = []

    def add_layer(self, layer):
        self.layer_list.append(layer)
        self.architecture.append(layer.neurons)

    def network_output(self, input_data):
        X = input_data
        Output_of_each_layer = []
        for layer in self.layer_list:
            X = layer.forward(X)
            Output_of_each_layer.append(X)
        return Output_of_each_layer

    def read_dataset(self, X, y, dividing_ratio):
        self.data_size = X.shape[0]
        self.train_size = int(self.data_size * dividing_ratio)
        self.train_X = X[:self.train_size, :]
        self.train_y = y[:self.train_size]
        self.val_X = X[self.train_size:, :]
        self.val_y = y[self.train_size:]

    def backpropagation(self, X, y, learning_rate):
        Hidden_layer_num = len(self.layer_list)
        Output = self.network_output(X)
        Output.insert(0, X)
        for i in reversed(range(Hidden_layer_num)):
            layer = self.layer_list[i]
            if layer == self.layer_list[-1]:
                layer.delta = (Output[-1] - y) * layer.activation_derivative(Output[-1])
            else:
                next_layer = self.layer_list[i + 1]
                layer.delta = layer.activation_derivative(Output[i + 1]) * (next_layer.weights @ next_layer.delta)
            layer.weights -= learning_rate * (Output[i].reshape(-1, 1) @ layer.delta.reshape(1, -1))
            layer.bias -= learning_rate * layer.delta

    def mse_loss_func(self, X, y):
        MSE = 0
        Input_data_size = X.shape[0]
        for i in range(Input_data_size):
            MSE += (self.network_output(X[i, :])[-1] - y[i]) ** 2
        return MSE[0] / Input_data_size

    def training(self, epochs, learning_rate, batch_size):
        self.train_loss = [self.mse_loss_func(self.train_X, self.train_y)]
        self.val_loss = [self.mse_loss_func(self.val_X, self.val_y)]
        self.accuracy = [self.accuracy_func(self.val_X, self.val_y)]
        self.boundary = [self.boundary_func()]
        for epoch in range(epochs):
            Batch_Index = random.choice(self.train_size, batch_size, replace=False)
            for i in Batch_Index:
                self.backpropagation(self.train_X[i, :], self.train_y[i], learning_rate)
            if epoch % 10 == 0:
                t_loss = self.mse_loss_func(self.train_X, self.train_y)
                v_loss = self.mse_loss_func(self.val_X, self.val_y)
                accur = self.accuracy_func(self.val_X, self.val_y)
                self.train_loss.append(t_loss)
                self.val_loss.append(v_loss)
                self.accuracy.append(accur)
                self.boundary.append(self.boundary_func())
                print('Epochs: %d, train loss: %f, val loss: %f, accuracy: %.3f' % (
                    epoch, t_loss, v_loss, accur))

    def evaluation(self, X, threshold=0.9):
        if self.network_output(X)[-1] > threshold:
            return 1
        else:
            return 0

    def accuracy_func(self, val_X, val_y, threshold=0.9):
        val_data_num = val_X.shape[0]
        error_num = 0
        for i in range(val_data_num):
            if self.evaluation(val_X[i, :], threshold) != val_y[i]:
                error_num += 1
        return (1 - error_num / val_data_num) * 100

    def boundary_func(self):
        x1_range = linspace(self.train_X[:, 0].min(), self.train_X[:, 0].max(), 100)
        x2_range = linspace(self.train_X[:, 1].min(), self.train_X[:, 1].max(), 100)
        bound_x1 = [];
        bound_x2 = []
        for i in range(len(x1_range)):
            for j in range(len(x2_range) - 1):
                y0 = self.evaluation(array([x1_range[i], x2_range[j]]))
                y1 = self.evaluation(array([x1_range[i], x2_range[j + 1]]))
                if y0 != y1:
                    bound_x1.append(x1_range[i])
                    bound_x2.append(x2_range[j])
        return [bound_x1, bound_x2]


# %% Classification
X = Data[['x1', 'x2']].values
y = Data['y'].values
DNN = Neural_Network()
DNN.add_layer(Layer(2, 8, 'sigmoid'))
DNN.add_layer(Layer(8, 16, 'sigmoid'))
DNN.add_layer(Layer(16, 8, 'sigmoid'))
DNN.add_layer(Layer(8, 1, 'sigmoid'))
DNN.architecture
DNN.read_dataset(X, y, dividing_ratio=0.8)
DNN.training(2000, 0.005, 1600)

"""
这里主要要定义两个类：层（Layer）和网络（Neural_Network），
其中层中包含层的输入（input_num）、输出（neuron_num）、激活函数（activation_func）、权重（weights）、偏置（bias）以及梯度下降关键参数（delta）；
然后在网络中继承层的参数，使用 add_layer 函数添加层数，network_output 函数计算各层输出，read_dataset 函数读取数据集，mse_loss_func 函数计算MSE误差，backpropagation 函数完成误差向后传播，并最终使用 Training 函数完成整个训练过程。
此外，还定义了evaluation函数依据分类阈值判断分类结果，accuracy_func 函数用来计算分类准确率，boundary_func 函数给出分类边界。

我们将全部数据集的80%作为训练集，剩下20%则作为验证集，
使用 Sigmoid 激活函数和 MSE 损失函数，定义了一个具有 4 层隐藏层的神经网络（各层神经元分别为8/16/8/1），
以 0.9 为分类阈值（网络计算结果>0.9则标记为1，反之为0)，以学习率 0.005 满 batch （每次训练选取的数据点个数）训练 2000 个 epoch 之后网络的分类准确率已经稳定到 95% 以上
"""