import numpy as np
import matplotlib.pyplot as plt


data = [[34.6, 78.0, 0],
        [30.3, 43.9, 0],
        [35.8, 72.9, 0],
        [60.2, 86.3, 1],
        [79.0, 75.3, 1],
        [45.1, 56.3, 0],
        [61.1, 96.5, 1],
        [75.0, 46.6, 1],
        [76.1, 87.4, 1],
        [84.4, 43.5, 1],
        [95.9, 38.2, 0],
        [75.0, 30.6, 0],
        [82.3, 76.5, 1],
        [69.4, 97.7, 1],
        [39.5, 76.0, 0],
        [54.0, 89.2, 1],
        [69.1, 52.7, 1],
        [67.9, 46.7, 0]]

x0 = list(filter(lambda x: x[-1] == 0., data))
x1 = list(filter(lambda x: x[-1] == 1., data))
plot_x0_0 = [i[0] for i in x0]
plot_x0_1 = [i[1] for i in x0]
plot_x1_0 = [i[0] for i in x1]
plot_x1_1 = [i[1] for i in x1]

plt.plot(plot_x0_0, plot_x0_1, 'ro', label='x_0')
plt.plot(plot_x1_0, plot_x1_1, 'bo', label='x_1')


def sigmoid(x):
    return 1 / (1 + np.e ** (-x))


def sigmoid_derivative(x):
    return sigmoid(x) * (1 - sigmoid(x))


class NeuralNetwork:
    def __init__(self, x, y):
        self.input    = x
        self.weights1 = np.array([[0.16], [0.16]])
        self.bias = -20.
        self.y        = y
        self.output   = np.zeros(self.y.shape)

    def feedforward(self, input=None):
        if input is not None:
            self.input = input
        self.z = np.dot(self.input, self.weights1) + self.bias
        self.output = sigmoid(self.z)
        return self.output

    def backprop(self, lr=0.0001):
        temp = (-2 * (self.y - self.output) * sigmoid_derivative(self.z)).T
        d_weights1 = np.dot(temp, self.input).T
        d_bias = np.mean(-2 * (self.y - self.output) * sigmoid_derivative(self.z)).astype(float)
        self.weights1 -= d_weights1 * lr
        self.bias -= d_bias * lr


if __name__ == '__main__':
    x_data_0 = np.concatenate((np.array(plot_x0_0), np.array(plot_x1_0)), axis=0)
    x_data_1 = np.concatenate((np.array(plot_x0_1), np.array(plot_x1_1)), axis=0)
    x_data = np.stack((x_data_0, x_data_1), axis=1)
    y_data = np.concatenate((np.zeros(len(plot_x0_0)), np.ones(len(plot_x1_0))), axis=0)
    y_data = np.expand_dims(y_data, axis=1)

    net = NeuralNetwork(x_data, y_data)
    for i in range(1000):
        res = net.feedforward()
        net.backprop()
        loss = np.mean((y_data - res) ** 2)
        print("loss is {}".format(loss))

    X = np.array(list(range(30, 100, 1)), dtype=np.float64)
    Y = (-net.weights1[0] * X - net.bias) / net.weights1[1]
    plt.plot(X, Y)

    plt.show()
