import numpy as np

def sigmoid(z):
    h = 1./(1+np.exp(-z))
    return h
    
def de_sigmoid(z,h):
    return h*(1-h)
    
    
def relu(z):
    h = np.maximum(z, 0)
    return h
    
def de_relu(z,h):
    z[z <= 0] = 0
    z[z > 0] = 1.0
    return z
 
    
def no_active(z):
    h = z
    return h

def de_no_active(z,h):
    return np.ones(h.shape)
    
# o Nxc
# lab Nxc    
def loss_L2(o,lab):
    diff = lab-o
    sqrDiff = diff ** 2
    return 0.5*np.sum(sqrDiff)
    
def de_loss_L2(o,lab):
    return o-lab


def loss_CE(o,lab):    
    p = np.exp(o)/np.sum(np.exp(o),axis=1,keepdims=True)
    loss_ce = np.sum(-lab*np.log(p))
    return loss_ce

def de_loss_CE(o,lab):
    p = np.exp(o)/np.sum(np.exp(o),axis=1,keepdims=True)
    return p-lab


def bulid_net(dim_in, list_num_hidden, list_act_funs, list_de_act_funs):
    layers=[]          
    
    # 逐层的进行网络构建
    for i in range(len(list_num_hidden)):
        layer = {}
        # 定义每一层的权重
        if i ==0:
            #layer["w"]= 0.2*np.random.randn(dim_in,list_num_hidden[i])-0.1 # 用sigmoid激活函数
            layer["w"]= 0.01*np.random.randn(dim_in,list_num_hidden[i])  #用relu 激活函数
        else:
            #layer["w"]= 0.2*np.random.randn(list_num_hidden[i-1],list_num_hidden[i])-0.1 # 用sigmoid激活函数
            layer["w"]= 0.01*np.random.randn(list_num_hidden[i-1],list_num_hidden[i]) # 用relu 激活函数
        
        # 定义每一层的偏置
        layer["b"] = 0.1*np.ones([1,list_num_hidden[i]])
        layer["act_fun"]= list_act_funs[i]
        layer["de_act_fun"]= list_de_act_funs[i]
        layers.append(layer)
        
    return layers
    
    
# 返回每一层的输入
# 与最后一层的输出    
def feed_forward(datas,layers):
    input_layers = []
    input_acfun = []
    for i in range(len(layers)):
        layer = layers[i]
        if i ==0:
            inputs = datas
            z = np.dot(inputs,layer["w"]) + layer["b"]
            h = layer['act_fun'](z)
            input_layers.append(inputs)
            input_acfun.append(z)
        else:
            inputs = h
            z = np.dot(inputs,layer["w"])+ layer["b"]
            h = layer['act_fun'](z)
            input_layers.append(inputs)
            input_acfun.append(z)
    return input_layers,input_acfun,h


# 进行参数更新更新    
def updata_wb(datas,labs,layers, loss_fun,de_loss_fun,alpha=0.01):
    N,D = np.shape(datas)
    # 进行前馈操作
    inputs,input_acfun,output = feed_forward(datas,layers)
    # 计算 loss
    loss = loss_fun(output,labs)
    #从后向前计算
    deltas0 = de_loss_fun(output,labs)
    # 从后向前计算误差
    deltas =[]
    for i in range(len(layers)):
        index = -i-1
        if i ==0:
            h = output
            z = input_acfun[index]
            delta = deltas0*layers[index]["de_act_fun"](z,h)
        else:
            h = inputs[index+1]
            z = input_acfun[index]
            # print(layers[index]["de_act_fun"](z,h)[1])
            delta = np.dot(delta,layers[index+1]["w"].T)*layers[index]["de_act_fun"](z,h)
        
        deltas.insert(0,delta)
    
    # 利用误差 对每一层的权重进行修成
    for i in range(len(layers)):
        # 计算 dw 与 db
        dw = np.dot(inputs[i].T,deltas[i])
        db = np.sum(deltas[i],axis=0,keepdims=True)
        # 梯度下降
        layers[i]["w"] = layers[i]["w"] - alpha*dw
        layers[i]["b"] = layers[i]["b"] - alpha*db
        
    return layers,loss
    
def test_accuracy(datas,labs_true,layers):
    _,_,output = feed_forward(datas,layers)
    lab_det = np.argmax(output,axis=1) # 由测试集的输入值前向传播计算出来的输出结果, 找到等于1的位置
    labs_true = np.argmax(labs_true,axis=1) # 测试机的真实值中，找到等于1的位置
    N_error = np.where(np.abs(labs_true-lab_det)>0)[0].shape[0]
   
    error_rate = N_error/np.shape(datas)[0]
    return error_rate

        
        
    

    
         
          
    
    
    
    
    
    
    
    
    
    
    
    
        
        
        


    

