from matplotlib.pyplot import *
import numpy as np


# 创建一个NN类，保存neural network的各种参数，类中只创建了空白变量
class NN:
    def __init__(self):
        self.learning_rate = 0.001  # 学习率
        self.error_rate = 0  # 保存损失函数的计算值   error_rate
        self.a = dict()  # 存放神经元的数值  a[0]为输入 a[1]为隐藏层aj  a[2]为zj = tanh(aj)  a[3]为神经网络输出层最终输出yk
        self.W = dict()  # W权重矩阵  W[0]为wji   W[1]为wkj
        self.W_grad = dict()  # W梯度  W_grad[0]为 a[0]、a[1]之间的导数   W_grad[1]为 a[2]、a[2]之间的导数
        self.delta = dict()  # 梯度δ  delta['δk']为输出层损失函数的梯度δk   delta['δj']为 a[0]、a[1]之间的导数


def data_preproces(file):
    # 数据预处理
    with open('./iris.data') as my_data:
        lines = my_data.readlines()
        # 创建空列表，存放数据与标签
        data = np.zeros((150, 4), dtype=float)  # 四个特征值    1. sepal length 2. sepal width 3. petal length 4. petal width
        label = np.zeros((150, 3), dtype=float)  # 标签为独热向量
        i = 0
        for line in lines:  # 循环按行读取
            line = line.split(',')  # 以逗号分开
            # 去除最后的标签，将特征值赋给data
            data[i] = line[:-1]
            # 标签的独热向量
            if line[4] == 'Iris-setosa\n':
                label[i] = np.array([1, 0, 0])
            elif line[4] == 'Iris-versicolor\n':
                label[i] = np.array([0, 1, 0])
            else:
                label[i] = np.array([0, 0, 1])
            # 防止文件中换行造成干扰，确保只读取有数据的150行
            i += 1
            if i == 150:
                break

    # 标准化 （Standardization）
    mean = np.mean(data, axis=0)  # 压缩行，对各列求均值，返回 1* n 矩阵
    sigma = np.std(data, axis=0)
    data = (data - mean) / sigma

    ############################################## 3:Shuffle the training data set randomly. ######################################
    # 乱序
    seed = np.random.randint(0, 10)
    np.random.seed(seed)
    np.random.shuffle(data)
    np.random.seed(seed)
    np.random.shuffle(label)

    # 在data第4列插入1    The input feature vector is augmented with the 1 since
    data = np.insert(data, 4, 1, axis=1)

    return data, label


def nn_forward(nn, train_x, train_y):
    """前馈"""
    # nn.W 神经网络的初始化参数W   其为字典形式，因此可以添加数据
    nn.a[0] = train_x  # nn.a 存放神经元的数值 # 字典添加信息，key为0，value为batch_x
    nn.a[1] = np.dot(nn.W[0], train_x)  # 隐藏层 aj
    nn.a[2] = np.tanh(nn.a[1])  # zj = tanh(aj)
    nn.a[3] = np.dot(nn.W[1], nn.a[2])  # yk  输出层最终输出
    # 目标函数/损失函数 E 的计算
    nn.error_rate = 0.5 * sum(sum((nn.a[3] - train_y) ** 2))
    return nn


def nn_backpropagation(nn, train_y):
    """后向传播"""
    # 输出层的梯度δk      Evaluate the δk for all output units
    nn.delta['δk'] = nn.a[3] - train_y  # nn.delta即所给推导公式中的δ，特别的下面nn.delta{nn.depth}对应推导中δ（l）
    # 隐藏层的梯度δj   (zj/a[2]对aj/a[1]的导数)         Backpropagate the δ’s to obtain δj for each hidden unit in the network
    nn.delta['δj'] = np.dot(nn.W[1].T, nn.delta['δk']) * (1 - nn.a[2] ** 2)  # 20*1
    # 第二层权重的导数
    nn.W_grad[1] = np.dot(nn.delta['δk'], nn.a[2].T)  # 3*1 x 1*20 = 3*20矩阵
    # 第一层权重的导数
    nn.W_grad[0] = np.dot(nn.delta['δj'], nn.a[0].T)  # 3*1 x 1*20 = 3*20矩阵
    return nn


def nn_applygradient(nn):
    """梯度下降"""
    for k in range(2):
        nn.W[k] = nn.W[k] - nn.learning_rate * nn.W_grad[k]
    return nn


def nn_test(nn, xTesting, yTesting):
    """测试神经网络，包括nn_predict:由输入得到输出"""
    wrongs = 0
    for i in range(len(xTesting)):
        test_x = xTesting[i].reshape(5, 1)  # (l+1)*batch_size也可以改成max((l+1)*batch_size, len(kk))
        test_y = yTesting[i].reshape(3, 1)
        nn_forward(nn, test_x, test_y)  # 使用前馈预测
        y_output = nn.a[3]
        label = np.argmax(y_output)  # 按行找出最大元素所在下标
        expectation = np.argmax(test_y)
        if label != expectation:
            wrongs += 1
    success_ratio = 1 - wrongs / yTesting.shape[0]
    return success_ratio


def draw(totalErrorRate, totalAccuracy):
    subplot(2, 1, 1)
    plot(totalErrorRate, color='red')
    title("Error Rate on the Training Set")

    subplot(2, 1, 2)
    plot(totalAccuracy, color='red')  # xlim([0, maxIteration])
    title('Accuracy on the Test Set')

    tight_layout()  # 自动调整子图参数，使之填充整个图像区域
    show()


if __name__ == '__main__':
    # 数据预处理，制作数据集与标签
    data, label = data_preproces('iris.data')

    # 拆分成测试集和验证集 五折交叉验证
    ratioTraining = 0.8  # 训练集占比 4/5=0.8
    xTraining = data[:int(150 * ratioTraining)]
    yTraining = label[:int(150 * ratioTraining)]
    xTesting = data[int(150 * ratioTraining):]
    yTesting = label[int(150 * ratioTraining):]

    # 实例化neural network类，并为该神经网络各个参数赋值
    nn = NN()
    ############################################## 1: Initialize w,η ##############################################
    nn.learning_rate = 0.001
    # SSSSS = np.random.rand(20, 5)
    nn.W[0] = (np.random.rand(20, 5) - 0.5) * 2 * np.sqrt(6 / (4 + 20 + 1))  # rand(height, width) 生成形式为(height, width)的随机数矩阵
    nn.W[1] = (np.random.rand(3, 20) - 0.5) * 2 * np.sqrt(6 / (3 + 20 + 1))  # rand产生伪随机数矩阵，即W权重矩阵初始化

    maxAccuracy = 0  # 保存最高准确率下的模型参数
    totalAccuracy = []  # 保存每轮训练测试集准确率
    totalErrorRate = []  # 保存每轮训练最终的损失函数值error_rate
    maxIteration = 50 # 训练轮次
    # todo 梯度下降， 五折交叉验证 ，bp a3
    ################################################### 2:fort= 1 to T do #############################################################
    for iteration in range(maxIteration):
        ############################################## 4: for n = 1 to N do ###########################################################
        for i in range(len(xTraining)):  # nn训练，BP算法实现部分
            #########################################  5:Choose the pattern xn  #######################################################
            train_x = xTraining[i].reshape(5, 1)
            train_y = yTraining[i].reshape(3, 1)
            ########################################## 6:Forward the inputxnthrough the network #######################################
            nn = nn_forward(nn, train_x, train_y)  # 前向计算
            ########################################## 7:Backward the gradient from the output layer through network to obtain ########
            nn = nn_backpropagation(nn, train_y)  # 后向计算
            ########################################## 8:Update the weights of the network ##############################################
            nn = nn_applygradient(nn)  # 梯度下降

        totalErrorRate.append(nn.error_rate)  # 保存每轮训练最终的损失函数error_rate
        accuracy = nn_test(nn, xTesting, yTesting)  # 测试集准确率
        totalAccuracy.append(accuracy)

        if accuracy > maxAccuracy:  # 保存最高准确率下的模型参数
            maxAccuracy = accuracy
            storedNN = nn

        print("Accuracy on the Test Set:      ", accuracy)
        print("Error Rate on the Training Set:", totalErrorRate[iteration])

    draw(totalErrorRate, totalAccuracy)
    # print(storedNN.W[0], '/n', storedNN.W[1])
