import math
import numpy as np
from sklearn import preprocessing
import xlsxwriter
import matplotlib.pyplot as plt


class NeuralNetwork(object):
    def __init__(self):
        self.hidden_layers_weights = []
        self.hidden_layers_bias = []
        self.error = []
        self.loss_average = []
        self.train_outputs = []
        self.train_inputs = []
        self.data = []
        self.loss_average_plot = []
        self.test_outputs = []
        self.test_inputs = []
        self.learning_rate = 0.0001
        self.min_max_scaler = preprocessing.MinMaxScaler()
        self.batch_size = 30
        self.iteration = 7
        self.epoch = 30

    def set_param(self, batch_size=30, iteration=7, epoch=30):
        self.batch_size = batch_size
        self.iteration = iteration
        self.epoch = epoch

    def initial_bp_neural_network(self, n_inputs, n_hidden_layers, n_outputs):
        """
        Param:
        -----------
        n_inputs:           输入层的数量
        n_hidden_layers:    hidden_layers 是一个列表，表示第n层隐含层神经元有几个
        n_outputs:          输出层的数量

        """
        # 一定要有隐含层, np.ones((行,列))；每个神经元对应着一个偏置
        if len(n_hidden_layers) > 0:
            self.hidden_layers_weights.append(np.random.randn(n_hidden_layers[0], n_inputs) * 0.01)
            for i in range(len(n_hidden_layers) - 1):
                self.hidden_layers_weights.append(np.random.rand(n_hidden_layers[i + 1], n_hidden_layers[i]) * 0.01)
            for i in n_hidden_layers:
                self.hidden_layers_bias.append(np.zeros(i))
            self.hidden_layers_weights.append(np.random.rand(n_outputs, n_hidden_layers[len(n_hidden_layers) - 1]) * 0.01)
            self.hidden_layers_bias.append(np.zeros(n_outputs))
        return self.hidden_layers_weights

    def forward_propagate(self, inputs):
        self.train_inputs = []
        self.train_outputs = []
        function_vector = np.vectorize(Leaky_ReLu)
        for i in range(len(self.hidden_layers_weights)):
            outputs = np.array(self.hidden_layers_weights[i]).dot(inputs)
            outputs = np.array(outputs) + np.array(self.hidden_layers_bias[i])
            outputs = np.array(self.batch_normalization(outputs))
            self.train_inputs.append(outputs)
            outputs = function_vector(outputs)
            # 把每一层的输出都记录进去
            self.train_outputs.append(outputs)
            inputs = outputs.copy()
        return self.train_outputs

    def backward_error_propagate(self, outputs):
        # error 是倒着来存放的
        self.error = []
        function_vector = np.vectorize(Leaky_ReLu_derivative)
        # train_outputs 包含了每一层的输出，最后一个才是最终的结果
        for i in range(len(self.train_outputs)):
            # 一个是对损失函数（基于softmax的交叉熵函数）求导数，一个是对激活函数求导数
            if i == 0:
                self.error.append(error_function_derivative(outputs, self.soft_max()) *
                             function_vector(np.array(self.train_inputs[len(self.train_inputs) - 1])))
                self.loss_average.append(self.error_function(outputs, self.soft_max()))
            else:

                # 激活函数求导，直接点乘；一个神经元对应的权重的误差求和
                self.error.append(function_vector(np.array(self.train_inputs[len(self.train_inputs) - 1 - i])) *
                             self.hidden_layers_weights[len(self.train_outputs) - i].T.dot(self.error[i - 1]))

    def update(self, inputs):
        # 更新权重
        for i in range(len(self.hidden_layers_weights)):
            if i == 0:
                self.hidden_layers_weights[i] = self.hidden_layers_weights[i] - self.learning_rate * (
                        self.error[len(self.error) - i - 1].reshape(len(self.error[len(self.error) - i - 1]), 1) *
                        np.array(inputs).reshape(1, len(inputs)) + np.array(self.hidden_layers_weights[i]))
            else:
                self.hidden_layers_weights[i] = self.hidden_layers_weights[i] - self.learning_rate * (
                        self.error[len(self.error) - i - 1].reshape(len(self.error[len(self.error) - i - 1]), 1) *
                        np.array(self.train_outputs[i - 1]).reshape(1, len(self.train_outputs[i - 1])) +
                        np.array(self.hidden_layers_weights[i]))
        for i in range(len(self.hidden_layers_bias)):
            self.hidden_layers_bias[i] = self.hidden_layers_bias[i] - self.learning_rate * (self.error[len(self.error) - i - 1])

    # 交叉熵函数作为损失函数
    def error_function(self, actual_outputs, predict_outputs):
        function_vector = np.vectorize(math.log)
        return -sum(actual_outputs * function_vector(predict_outputs))

    # 预测判断,判断是否达到收敛或者是和预期结果一样,精度可以自己调试
    def judge_predict(self):
        for i in range(len(self.train_outputs[len(self.train_outputs) - 1])):
            if abs(self.train_outputs[len(self.train_outputs) - 1][i] - self.test_outputs[i]) > 0.00001:
                return 0
        return 1

    # 分类判断,输出精确度
    def judge_classification(self, label):
        max_value = max(self.soft_max())
        max_index = self.soft_max().index(max_value)
        if max_index + 1 == label:
            return label
        return 0

    # 将输出的数值转换为概率
    def soft_max(self):
        return [math.exp(self.train_outputs[len(self.train_outputs) - 1][0]) / (math.exp(self.train_outputs[len(self.train_outputs) - 1][0]) +
                                                                      math.exp(self.train_outputs[len(self.train_outputs) - 1][1]) +
                                                                      math.exp(self.train_outputs[len(self.train_outputs) - 1][2])),
                math.exp(self.train_outputs[len(self.train_outputs) - 1][1]) / (math.exp(self.train_outputs[len(self.train_outputs) - 1][0]) +
                                                                      math.exp(self.train_outputs[len(self.train_outputs) - 1][1]) +
                                                                      math.exp(self.train_outputs[len(self.train_outputs) - 1][2])),
                math.exp(self.train_outputs[len(self.train_outputs) - 1][2]) / (math.exp(self.train_outputs[len(self.train_outputs) - 1][0]) +
                                                                      math.exp(self.train_outputs[len(self.train_outputs) - 1][1]) +
                                                                      math.exp(self.train_outputs[len(self.train_outputs) - 1][2]))]

    # 训练网络
    def train_network(self):
        file_path = "..//test_file//seeds_dataset.txt"
        self.data = read_data(file_path)

        # 从excel中读取数据
        workbook = xlsxwriter.Workbook('data.xlsx')
        worksheet = workbook.add_worksheet()

        for _epoch in range(self.epoch):
            # 获取乱序, 让每次迭代都不一样
            if _epoch != 1:
                np.random.shuffle(self.data)

            # 获取输入
            self.test_inputs = [row[0:7] for row in self.data]
            self.test_inputs = self.min_max_scaler.fit_transform(self.test_inputs)

            # 获取输出
            self.test_outputs = [row[7] for row in self.data]
            # 训练一个batch就是一次iteration
            for i in range(self.iteration):
                # 一次迭代更新网络的参数
                for j in range(self.batch_size):
                    self.forward_propagate(self.test_inputs[j + i * self.batch_size])
                    # 使用独热编码
                    if self.test_outputs[j + i * self.batch_size] == 1:
                        self.backward_error_propagate(np.array([1, 0, 0]))
                    elif self.test_outputs[j + i * self.batch_size] == 2:
                        self.backward_error_propagate(np.array([0, 1, 0]))
                    else:
                        self.backward_error_propagate(np.array([0, 0, 1]))
                    self.update(self.test_inputs[j + i * self.batch_size])
            n1, n2, n3 = 0, 0, 0
            # 使用训练好的网络来得到测试的结果
            print('--------开始测试集的工作--------')
            for j in range(len(self.test_outputs)):
                self.forward_propagate(self.test_inputs[j])
                if self.judge_classification(self.test_outputs[j]) == 1:
                    n1 += 1
                elif self.judge_classification(self.test_outputs[j]) == 2:
                    n2 += 1
                elif self.judge_classification(self.test_outputs[j]) == 3:
                    n3 += 1
            acc1 = n1 / 70
            acc2 = n2 / 70
            acc3 = n3 / 70
            worksheet.write(_epoch, 0, acc1)
            worksheet.write(_epoch, 1, acc2)
            worksheet.write(_epoch, 2, acc3)
            print('第' + str(_epoch+1) + "次迭代")
            print('准确率为:')
            print(acc1)
            print(acc2)
            print(acc3)
            self.loss_average_plot.append(np.sum(np.array(self.loss_average)) / (self.iteration * self.batch_size))
            self.loss_average = []
        self.loss_plot()
        workbook.close()

    def predict(self, test_x):
        y_test = self.forward_propagate(test_x)
        d = softmax(y_test[1])
        max_value = max(d)
        max_index = self.soft_max().index(max_value)
        return max_index

    def loss_plot(self):
        fig, ax = plt.subplots()
        # 绘制线条粗细
        ax.plot(self.loss_average_plot, linewidth=3)
        # 设置图表标题并给坐标轴加上标签
        ax.set_title("Loss Average by an Iteration", fontsize=24)
        ax.set_xlabel("Epoch", fontsize=14)
        ax.set_ylabel("Loss Average", fontsize=14)
        plt.xticks(range(0, len(self.loss_average_plot)))
        plt.show()

    def batch_normalization(self, outputs):
        output = []
        # 求均值
        arr_mean = np.mean(outputs)
        # 求方差
        arr_var = np.var(outputs)
        for i in outputs:
            output.append((i-arr_mean)/(math.pow(arr_var, 0.5)))
        return output


def read_data(file_path):
    _data = []
    with open(file_path, "r") as file:
        lines = file.readlines()
        _temp_data = None
        for line in lines:
            _temp_data = line.strip().split()
            _data.append(str2float(_temp_data))
    return np.array(_data)


def str2float(_data):
    _Data = []
    length = len(_data)
    for i in range(length):
        _Data.append(float(_data[i]))
    return _Data


# 交叉熵函数的导数,针对输出层所有神经元
# 这个顺序一定不能颠倒
def error_function_derivative(actual_outputs, predict_outputs):
    return predict_outputs - actual_outputs


# 激活函数 Leaky_ReLu
def Leaky_ReLu(x):
    if x > 0:
        return x
    return 0.01 * x


def sigmoid(x):
    return 1/(1+math.exp(-x))


def tanh(x):
    return 2*sigmoid(2*x)-1


# 激活函数Leaky_ReLu函数的导数
def Leaky_ReLu_derivative(x):
    if x > 0:
        return 1
    return 0.01


def softmax(z):
    # 计算softmax函数
    e_z = np.exp(z)
    return e_z / np.sum(e_z)


if __name__ == '__main__':
    network = NeuralNetwork()
    network.initial_bp_neural_network(7, [11], 3)
    network.set_param()
    network.train_network()
    # test_x = [12.3, 13.34, 0.8684, 5.243, 2.974, 5.637, 5.063]
    test_x = [11.84, 13.21, 0.8521, 5.175, 2.836, 3.598, 5.044]
    test_y = network.predict(test_x)
    print(test_y)



