import numpy as np
from collections import OrderedDict
from layers import *

class MultiLayerNet:
    def __init__(self,input_size,hidden_size_list,output_size,
                 activation='relu',weight_init_std='relu',
                 weight_decay_lambda=0):
        self.input_size=input_size#输入层的神经元大小
        self.output_size=output_size#输出层的神经元大小
        self.hidden_size_list=hidden_size_list#隐藏层神经元的大小列表
        self.hidden_layer_num=len(hidden_size_list)#隐藏层的层数
        self.weight_decay_lambda=weight_decay_lambda#L2范数的强度
        #初始化每一层的权重
        self.params={}
        self.__init_weight(weight_init_std)
        #生成层
        activation_layer={'sigmoid':Sigmoid,'relu':Relu}#激活层有sigmoid和relu可以选择，取决于传入的参数 activation
        self.layers=OrderedDict()#每一层是有序字典
        for idx in range(1,self.hidden_layer_num+1):
            self.layers['Affine'+str(idx)]=Affine(self.params['W'+str(idx)],self.params['b'+str(idx)])
            self.layers['Activation_function'+str(idx)]=activation_layer[activation]()#根据传入的参数activation来调用我在 layers里面写的 sigmoid 和 relu
        #最后一层是 softmax
        idx=self.hidden_layer_num+1
        self.layers['Affine'+str(idx)]=Affine(self.params['W'+str(idx)],self.params['b'+str(idx)])
        self.last_layer=SoftmaxWithLoss()
    #初始化参数的值
    def __init_weight(self,weight_init_std):
        #指定‘relu'或者'he'的情况下用'He'的初始值
        #指定'sigmoid'或者'xaiver'的情况下设定'Xavier'的初始值
        #拼接成每一层的神经元数目的列表
        all_size_list=[self.input_size]+self.hidden_size_list+[self.output_size]
        #对于每一层来说
        for idx in range(1,len(all_size_list)):
            #如果是具体的数值，那么直接等于这个值就好了
            scale=weight_init_std
            #√2/n n 是上一层神经元的个数，lower 是转成小写的意思
            if str(weight_init_std).lower() in ('relu','he'):
                scale=np.sqrt(2.0/all_size_list[idx-1])
            #√1/n
            elif str(weight_init_std).lower() in ('sigmoid','xavier'):
                scale=np.sqrt(1.0/all_size_list[idx-1])
            #初始化权重
            self.params['W'+str(idx)]=scale*np.random.randn(all_size_list[idx-1],all_size_list[idx])
            self.params['b'+str(idx)]=np.zeros(all_size_list[idx])
    def predict(self,x):
        for layer in self.layers.values():
            x=layer.forward(x)

        return x

    def loss(self,x,t):
        y=self.predict(x)
        #惩罚模型中过大的权重参数，一种正则化，防止过拟合
        weight_decay=0
        for idx in range(1,self.hidden_layer_num+2):#这个+2是把输入层和输出层给加上了
            W=self.params['W'+str(idx)]
            weight_decay+=0.5*self.weight_decay_lambda*np.sum(W**2)#L2正则化
        return self.last_layer.forward(y,t)+weight_decay

    def gradient(self,x,t):
        self.loss(x,t)
        d_out=1
        d_out=self.last_layer.backward(d_out)
        l=list(self.layers.values())
        l.reverse()
        for layer in l:
            d_out=layer.backward(d_out)
        grads={}
        for idx in range(1,self.hidden_layer_num+2):
            grads['W'+str(idx)]=self.layers['Affine'+str(idx)].dW+self.weight_decay_lambda*self.layers['Affine'+str(idx)].W#L2正则化
            grads['b'+str(idx)]=self.layers['Affine'+str(idx)].db
        #宝子不要总是忘了返回，这样会很伤心的
        return grads