#-*-coding:utf-8 -*-

import numpy as np

"""
非线性方程-逻辑函数
"""
#双曲函数
def tanh(x):
    return np.tanh(x)
#双曲函数-求导
def tanh_deriv(x):
    return 1.0-np.tanh(x)*np.tanh(x)

#逻辑函数
def logistic(x):
    return 1/(1+np.exp(-x))
#逻辑函数-求导
def logistic_deriv(x):
    return logistic(x)*(1-logistic(x))


class NeuralNetwork:

    def __init__(self,layers,activation='tanh'):
        """ 构造函数

            layers-list:神经网络的层数组(包括输入层)；layers=[5,3,2]表示输入层5个单元,隐藏层3个单元,输出层2个单元
            activation-str:使用哪种非线性方程算法

        """

        # 根据activation参数初始化不同非线性算法
        if activation=='logistic':
            self.activation=logistic
            self.activation_deriv=logistic_deriv
        else:
            self.activation=tanh
            self.activation_deriv=tanh_deriv

        # 初始化设置随机权重
        self.weights=[]
#        for i in range(1, len(layers)-1 ):
#            self.weights.append(np.random.random((layers[i - 1] + 1, layers[i] + 1)))
#            self.weights.append(np.random.random((layers[i] + 1, layers[i + 1])))


        # for i in range(1,len(layers)-1):
        #     self.weights.append( np.random.random( (layers[i-1],layers[i]) ))
        #
        # self.weights.append(np.random.random((layers[-2],layers[-1]))) # 设置输出单元权重


        # 设置隐藏单元的权重
        for i in range(1, len(layers) - 1):
            self.weights.append(np.random.random((layers[i - 1] + 1, layers[i] + 1)))
        self.weights.append(np.random.random((layers[-2] + 1, layers[-1])))

        # print( "Weights=>"+str(self.weights) )


    def fit(self, X, y, learning_rate=0.2, epochs=10000):
        """ 建立模型
            X:训练集2维矩阵;每行表示一个实例,列表示特征属性
            y:结果标记;
            learning_rate:学习率0-1之间
        """

        # 将X变成numpy的array; 并增加一个列统一之都为1
        X=np.atleast_2d(X)
        temp=np.ones([X.shape[0],X.shape[1]+1])
        temp[:,0:-1]=X
        X=temp
        y=np.array(y)



        for k in range(epochs):
            i=np.random.randint(X.shape[0]) # 随机抽取一行实例；
            a=[X[i]]


            # 获取输出单元的误差值
            for l in range(len(self.weights)):  #going forward network, for each layer
                r1 = np.dot(a[l], self.weights[l])   # 获取内积值
                r2 = self.activation( r1 )  # 进行非线性转换
                a.append( r2 )
            deltas=[(y[i]-a[-1])*self.activation_deriv(a[-1])]    # 计算最后err值

            # 获取隐藏单元的误差值
            for l in range(len(a)-2,0,-1):
                #Compute the updated error (i,e, deltas) for each node going from top layer to input layer
                deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_deriv(a[l]))
            deltas.reverse()

            # 方向更新权重和偏向
            for i in range(len(self.weights)):
                layer=np.atleast_2d(a[i])
                delta=np.atleast_2d(deltas[i])
                self.weights[i]+=learning_rate*layer.T.dot(delta)


    def predict(self,x):
        """ 预测结果

        """

        x=np.array(x)
        temp=np.ones(x.shape[0]+1)
        temp[0:-1]=x
        a=temp
        for l in range(0,len(self.weights)):
            a=self.activation(np.dot(a,self.weights[l]))
        print('a:',a)
        return a



