import numpy as np
import pylab as pl


# 神经网络类
class NN:
    def __init__(self):

        np.random.seed(0)

        # ==========
        # Initialize parameters
        # ==========

        self.w1 = np.random.randn(3, 24)
        self.w2 = np.random.randn(24, 12)
        self.w3 = np.random.randn(12, 6)
        self.w4 = np.random.randn(6, 3)


    # forward computation
    def call(self, input):

        #==========
        #Realize the forward computation of nn
        #==========

        self.x1 = input @ self.w1
        self.x2 = self.x1 @ self.w2
        self.x3 = np.tanh(self.x2)
        self.x4 = self.x3 @ self.w3
        self.x5 = self.x4 @ self.w4
        self.x6 = np.tanh(self.x5)
        self.softmax = np.zeros_like(self.x6)

        #softmax
        for i in range(input.shape[0]):
            sum_softmax = np.exp(self.x6[i][0]) + np.exp(self.x6[i][1]) + np.exp(self.x6[i][2])
            self.softmax[i][0] = np.exp(self.x6[i][0]) / sum_softmax
            self.softmax[i][1] = np.exp(self.x6[i][1]) / sum_softmax
            self.softmax[i][2] = np.exp(self.x6[i][2]) / sum_softmax

        return self.softmax


    # training GD
    def fit(self, data, y, learning_rate=1, epochs=1):
        '''
        训练参数
        :param data:
        :param learning_rate:
        :param epochs:
        :return: loss
        '''
        loss = np.zeros(epochs)
        v1 = 0
        v2 = 0
        v3 = 0
        v4 = 0
        ro = 0.9
        # mini_batch = 50

        for epoch in range(epochs):

            # row_idx_rand = np.arange(data.shape[0])
            # np.random.shuffle(row_idx_rand)
            # data = data[row_idx_rand[0:mini_batch]]

            y = get_label(data, data.shape[0])

            # ==========
            # Realize the backward propagation of gradient
            # ==========
            y_pred = self.call(data)
            loss[epoch] = -np.sum(y * np.log(y_pred))

            dx6 = y_pred - y
            dx5 = dx6 * (1 - np.tanh(self.x5) ** 2)

            dx4 = dx5 @ self.w4.T
            dw4 = self.x4.T @ dx5

            dx3 = dx4 @ self.w3.T
            dw3 = self.x3.T @ dx4

            dx2 = dx3 * (1 - np.tanh(self.x3) ** 2)
            dw2 = self.x1.T @ dx2

            dx1 = dx2 @ self.w2.T
            dw1 = data.T @ dx1

            # update w1 w2 w3 w4
            self.w1 -= learning_rate * dw1
            self.w2 -= learning_rate * dw2
            self.w3 -= learning_rate * dw3
            self.w4 -= learning_rate * dw4

        return loss


# =============================================================
# Generate training set and test set
# =============================================================
def create_dataSet(dataSize, ratio):
    AllData = np.zeros((dataSize, 3))
    AllData[:, 0:2] = np.random.uniform(-3, 3, (dataSize, 2))
    Label = np.zeros((dataSize, 3))

    for i in range(dataSize):
        if abs(AllData[i][0] - 1.5) + abs(AllData[i][1]) < 1:
            Label[i][0] = 1
        elif (AllData[i][0] + 1) ** 2 + (AllData[i][1] - 1) ** 2 > 1:
            Label[i][1] = 1
        else:
            Label[i][2] = 1

    trainSize = int(dataSize * ratio)

    trainData = AllData[0:trainSize, :]
    testData = AllData[trainSize:dataSize, :]

    trainLabel = Label[0:trainSize, :]
    testLabel = Label[trainSize:dataSize, :]

    return trainData, testData, trainLabel, testLabel

# =============================================================
# Get label of the data
# =============================================================
def get_label(train_data, size):
    label = np.zeros((size, 3))
    for i in range(size):
        if abs(train_data[i][0] - 1.5) + abs(train_data[i][1]) < 1:
            label[i][0] = 1
        elif (train_data[i][0] + 1) ** 2 + (train_data[i][1] - 1) ** 2 > 1:
            label[i][1] = 1
        else:
            label[i][2] = 1

    return label

# =============================================================
# Normalize the data
# =============================================================
def StandardScale(data):
    avg = np.mean(data)
    data_max = max(data)
    data_min = min(data)

    ss_data = (data - avg) / (data_max - data_min)

    return ss_data

def accuracy_score(pred, real):
    num = 0
    for i in range(pred.shape[0]):
        if pred[i] == real[i]:
            num += 1

    return num/pred.shape[0]

# =============================================================
# Main
# =============================================================

# Generate data set
x_train, x_test, y_train, y_test = create_dataSet(500, 0.75)

# Normalize data in [-1, 1]
x_train = x_train/3
x_test = x_test/3

# 训练的超参数
epochs = 10000
learning_rate = 1e-5

mymodel = NN()

loss = mymodel.fit(x_train, y_train, learning_rate, epochs)
out = mymodel.call(x_test)

# 将每个样本的三维向量输出转换为标签
out3 = np.argmax(out, axis=1)
y_test3 = np.argmax(y_test, axis=1)

#############################
print("accuray", accuracy_score(out3, y_test3))

pl.plot(loss)
pl.xlabel('Epoch number')
pl.ylabel('error (cross entropy)')
pl.show()
# pl.savefig("loss.png")


