
import random
import numpy as np
import pandas as pd
import copy
import time
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt

class Network(object):

    def __init__(self, sizes):
        '''列表大小(sizes)包含大脑中神经元的数量网络的各个层。例如，如果列表是[2,3,1]，
        那么它将是一个3层网络，第1层包含2个神经元，第二层包含3个神经元，第三层是1个神经元。
        神经网络的偏差和权重使用高斯函数随机初始化网络平均值为0，方差为1的分布。
		请注意，第一个层被假定为输入层，一般不会为这些神经元设置任何偏差，
        因为偏差只是曾用于计算后续层的输出。'''
        self.num_layers = len(sizes)
        self.sizes = sizes
        self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
        self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]
        
        # helper variables
        self.bias_nitem = sum(sizes[1:])
        self.weight_nitem = sum([self.weights[i].size for i in range(self.num_layers-2)])

    
    def feedforward(self, a):	# 神经网络的前向传播（Forward Propagation)
        '''Return the output of the network if ``a`` is input.'''
        for bias, weight in zip(self.biases, self.weights):
            # 激活函数
            #a = self.sigmoid(np.dot(weight, a) + bias)
            #a = self.tanh(np.dot(weight, a) + bias)
            a = self.ReLu(np.dot(weight, a) + bias)
        return a

    def sigmoid(self, z):
        return 1.0/(1.0+np.exp(-z))

    def tanh(self, z):
        return (np.exp(z) - np.exp(-z)) / (np.exp(z) + np.exp(-z))

    def ReLu(self, z):
        return (z + np.abs(z)) / 2.0
        #return  np.maximum(z, 0)

    # loss函数
    def score(self, X, y):
        '''
        @X = 输入数据
        @y = 输入数据对应的标签（类别）
        @returns = 神经网络的预测值分数 (越小越好)
        @ref: https://stats.stackexchange.com/questions/154879/a-list-of-cost-functions-used-in-neural-networks-alongside-applications
        '''
        total_score=0
        for i in range(X.shape[0]):
			# 前向传播
            predicted = self.feedforward(X[i].reshape(-1,1))
            actual = y[i].reshape(-1,1)
            # 均方误差
            total_score += np.sum(np.power(predicted - actual, 2) / 2)  
        return total_score

	# 精度
    def accuracy(self, X, y):
        '''
        @X = 输入数据
        @y = 输入数据对应的标签（类别）
        @returns = 精度 (%) (越大越好)
        '''
        accuracy = 0
        for i in range(X.shape[0]):
            output = self.feedforward(X[i].reshape(-1,1))
			# 比较 前向传播计算的预测值 与 真实输入数据 是否相等，来判断当前次进化神经网络预测分类是否是准确的
            accuracy += int(np.argmax(output) == np.argmax(y[i]))
        return accuracy / X.shape[0] * 100

    def __str__(self):
        s = "\nBias:\n\n" + str(self.biases)
        s += "\nWeights:\n\n" + str(self.weights)
        s += "\n\n"
        return s

class NNGeneticAlgo:

    def __init__(self, n_pops, net_size, mutation_rate, crossover_rate, retain_rate, X, y):
        '''
        n_pops   = 遗传算法迭代次数
        net_size = 神经网络的成员数
        mutation_rate = 神经网络内部的权重和偏置的变异概率
        crossover_rate = 神经网络内部的权重和偏置的交叉概率
        retain_rate = 迭代过程中, 最好成员保留率
        X = 用来测试 精度 的数据
        y = 用来测试 精度 的数据标签
        '''
        self.n_pops = n_pops
        self.net_size = net_size
        self.nets = [Network(self.net_size) for i in range(self.n_pops)]
        self.mutation_rate = mutation_rate
        self.crossover_rate = crossover_rate
        self.retain_rate = retain_rate
        self.X = X[:]
        self.y = y[:]
    
    def get_random_point(self, type):
        '''
        @type = 权重 或者 偏置
        @returns tuple (layer_index, point_index)
            注意: 如果类型是 权重 , 点索引会返回(行索引, 列索引)
        '''
        nn = self.nets[0]
        layer_index, point_index = random.randint(0, nn.num_layers-2), 0
        if type == 'weight':
            row = random.randint(0,nn.weights[layer_index].shape[0]-1)
            col = random.randint(0,nn.weights[layer_index].shape[1]-1)
            point_index = (row, col)
        elif type == 'bias':
            point_index = random.randint(0,nn.biases[layer_index].size-1)
        return (layer_index, point_index)

    def get_all_scores(self):
        return [net.score(self.X, self.y) for net in self.nets]

    def get_all_accuracy(self):
        return [net.accuracy(self.X, self.y) for net in self.nets]

    def crossover(self, father, mother):
        '''
        @father = 代表父亲的神经网络对象
        @mother =  代表母亲的神经网络对象
        @returns = 基于父亲和母亲的遗传信息所得到的新的孩子
        '''
        # 复制一份 父亲 的 “遗传” 权重 和 偏置 信息
        nn = copy.deepcopy(father)

        # 对 偏置 进行交叉
        for _ in range(self.nets[0].bias_nitem):
            # 获取一些随机值
            layer, point = self.get_random_point('bias')
            # 使用母亲的偏置 覆盖 孩子的偏置
            if random.uniform(0,1) < self.crossover_rate:
                nn.biases[layer][point] = mother.biases[layer][point]

        # 对 权重 进行交叉
        for _ in range(self.nets[0].weight_nitem):
            # 获取一些随机值
            layer, point = self.get_random_point('weight')
            # 使用母亲的权重 覆盖 孩子的权重
            if random.uniform(0,1) < self.crossover_rate:
                nn.weights[layer][point] = mother.weights[layer][point]
        
        return nn
        
    def mutation(self, child):
        '''
        @child_index = neural-net object to mutate its internal weights & biases value  
        @returns = 最新变异后的神经网络
        '''
        nn = copy.deepcopy(child)

        # 变异的偏置
        for _ in range(self.nets[0].bias_nitem):
            # 获取一些随机值
            layer, point = self.get_random_point('bias')
            # 添加 -0.5 到 0.5 之间的随机值
            if random.uniform(0,1) < self.mutation_rate:
                nn.biases[layer][point] += random.uniform(-0.5, 0.5)

        # 变异的权重
        for _ in range(self.nets[0].weight_nitem):
            # 获取一些随机值
            layer, point = self.get_random_point('weight')
            # 添加 -0.5 到 0.5 之间的随机值
            if random.uniform(0,1) < self.mutation_rate:
                nn.weights[layer][point[0], point[1]] += random.uniform(-0.5, 0.5)

        return nn

    def evolve(self):
        # 计算神经网络的每次迭代的分数
        score_list = list(zip(self.nets, self.get_all_scores()))

        # 使用计算出来的分数对神经网络进行排序
        score_list.sort(key=lambda x: x[1])

        # 去掉分数, 因为不需要了
        score_list = [obj[0] for obj in score_list]

        # 只保留最好的一个
        retain_num = int(self.n_pops * self.retain_rate)
        score_list_top = score_list[:retain_num]

        # 随机取一些不算最好的成员保存起来
        retain_non_best = int((self.n_pops-retain_num) * self.retain_rate)
        for _ in range(random.randint(0, retain_non_best)):
            score_list_top.append(random.choice(score_list[retain_num:]))

        # 如果当前的 迭代次数 小于 我们所规定的迭代次数, 就继续培养新的孩子
        while len(score_list_top) < self.n_pops:
            father = random.choice(score_list_top)
            mother = random.choice(score_list_top)
            if father != mother:
				# 交叉
                new_child = self.crossover(father, mother)
				# 变异
                new_child = self.mutation(new_child)
                score_list_top.append(new_child)
    
        # 复制培养好的最新一代成员到当前对象中，用于下一次进化
        self.nets = score_list_top

def main():
    # 从 iris.csv 读取数据赋值给 X 和 y
    df = pd.read_csv("iris.csv")
    X = df.iloc[:, :-1].values
    y = df.iloc[:, -1].values

    # 将y转化为 one-hot 编码格式
    y = y.reshape(-1, 1)
    enc = OneHotEncoder()
    enc.fit(y)
    y = enc.transform(y).toarray()

    # 输入参数
    N_POPS = 30
    NET_SIZE = [4,6,5,3]
    MUTATION_RATE = 0.2
    CROSSOVER_RATE = 0.4
    RETAIN_RATE = 0.4

    # 初始化神经网络, 并且用遗传算法来优化
    nnga = NNGeneticAlgo(N_POPS, NET_SIZE, MUTATION_RATE, CROSSOVER_RATE, RETAIN_RATE, X, y)

    start_time = time.time()
    max_accuracy = 0
    iteration_cnt_list = []
    accuracy_list = []
    plt.ion()
    plt.figure(1)

    # 开始迭代n次
    for i in range(1000):
        if nnga.get_all_accuracy()[0] > max_accuracy:
            max_accuracy = nnga.get_all_accuracy()[0]
        if i % 10 == 0:
            print("Current iteration : {}".format(i+1))
            print("Time taken by far : %.1f seconds" % (time.time() - start_time))
            print("Current top member's network accuracy: %.2f%%\n" % nnga.get_all_accuracy()[0])
            iteration_cnt_list.append(i)
            accuracy_list.append(nnga.get_all_accuracy()[0])
            plt.plot(iteration_cnt_list, accuracy_list, c='r',ls='-', marker='o', mec='b',mfc='w') ## 绘制趋势图
            plt.pause(0.1)
        # 单次进化
        nnga.evolve()

    print("The max top member's network accuracy: %.2f%%\n" % max_accuracy)
    plt.pause(1000)

if __name__ == "__main__":
    main()