#coding:utf-8


import random
import numpy as np

class Network(object):


    def __init__(self,sizes):
        '''
        :param sizes:
            拥有网络的层数，以及每层网络的神经元数目
            eg。[2,3,1] 网络层数是3，输入层2个神经元，隐含层有三个神经元，一个输出神经元
        '''
        self.num_layers=len(sizes)
        self.sizes=sizes
        self.costs=[]
        self.step=-1
        self.bias=[np.random.randn(y,1) for y in  sizes[1:]]
        self.weights=[np.random.randn(y,x)
                      for x,y in zip(sizes[:-1],sizes[1:])]

    def feed_forward(self,datas):
        '''
            前向传播，组成传播，最后获取输出层的计算结果
        :param input: 输入值
        :return: 返回计算结果(输出层)
        '''

        for b,w in zip(self.bias,self.weights):
            datas=sigmod(np.dot(w,datas)+b)
        return datas

    def SGD(self,train_data,epochs,min_batch_size,learn_rate,test_data=None):
        '''
        使用梯度下降法，
        :param train_data: 训练数据 [（x1,y1）,（x2,y2）...]
        :param epochs: 训练的次数
        :param min_batch_size: 多少次做一次bp
        :param learn_rate:
        :param test_data:
        :return:
        '''
        if test_data:n_test=len(test_data)
        n=len(train_data)

        for j in range(epochs):
            self.step += 1
            self.costs.append(0.0)
            #训练次数
            random.shuffle(train_data)
            min_batches=[train_data[k:k+min_batch_size]
                         for k in range(0,n,min_batch_size)]
            for min_batch in  min_batches:
                self.update_mini_batch(min_batch,learn_rate)
            if test_data:
                print("Epoch {0}: {1} / {2},costs : {3}".format(
                    j, self.evaluate(test_data), n_test,self.costs[self.step]))

            else:
                print(
                "Epoch {0} complete".format(j))

    def update_mini_batch(self, mini_batch, learn_rate):
        '''
           使用 BP 更新weights，bias
        :param mini_batch:
        :param learn_rate:
        '''
        nabla_b=[np.zeros(b.shape) for b in  self.bias]
        nabla_w=[np.zeros(w.shape) for w in self.weights]
        for x,y in mini_batch:
            delta_nabla_b,detla_nabla_w=self.back_prop(x,y)
            nabla_b=[nb+dnb for nb ,dnb in zip(nabla_b,delta_nabla_b)]
            nabla_w=[nw+dnw for nw ,dnw in zip(nabla_w,detla_nabla_w)]
        self.weights=[w-(learn_rate/len(mini_batch))*nw
                      for w,nw in zip(self.weights,nabla_w)]
        self.bias=[b-(learn_rate/len(mini_batch))*nb
                      for b,nb in zip(self.bias,nabla_b)]

    def back_prop(self,x,y):
        '''
            一层接着一层，计算（nabla_b,nabla_w）：得到loss
            nabla_b,nabla_w的数据形式与weights，bias一致
        :param x:
        :param y:
        :return:
        '''
        nabla_b = [np.zeros(b.shape) for b in self.bias]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        #feedforward
        activation=x
        #保存每一层的activation
        activations=[x]
        #记录每层的z输出结果向量（未执行激活函数）
        zs=[]
        for b,w in zip(self.bias,self.weights):
            z=np.dot(w,activation)+b
            zs.append(z)
            activation=sigmod(z)
            activations.append(activation)
        #update cost
        self.cost_cal(activations[-1],y)
        #backward pass
        delta=self.cost_derivative(activations[-1],y)*sigmoid_prime(zs[-1])
        #输出层
        nabla_b[-1]=delta
        nabla_w[-1]=np.dot(delta,activations[-2].transpose())
        #隐藏层
        for l in range(2,self.num_layers):
            z=zs[-l]
            sp=sigmoid_prime(z)
            delta=np.dot(self.weights[-l+1].transpose(),delta)*sp
            nabla_b[-l]=delta
            nabla_w[-l]=np.dot(delta,activations[-l-1].transpose())
        return (nabla_b,nabla_w)


    def evaluate(self, test_data):
        test_results = [(np.argmax(self.feed_forward(x)), y) for (x, y) in test_data]
        return sum(int(x == y) for (x, y) in test_results)


    def cost_derivative(self, output_activations, y):
        '''
            Cost function 导数
        :param output_activations:
        :param y:
        :return:
        '''
        return (output_activations - y)

    def cost_cal(self,y,label):
        if not isinstance(y,np.ndarray):
            y=np.array(y)
        if not isinstance(label,np.ndarray):
            label=np.array(label)
        self.costs[self.step]+=sum(map(lambda x: x*x , (y-label)))



def sigmod(z):
    '''
    :param z:  sigmod 函数
    :return:
    '''
    return 1.0/(1.0+np.exp(-z))

def sigmoid_prime(z):
    '''
    :param z:  sigmoid 的导数
    :return:
    '''
    return sigmod(z)*(1-sigmod(z))