import numpy as np
from collections import OrderedDict
from layers import *
class MultiLayerNetExtend:
    def __init__(self,input_size,hidden_size_list,output_size,
                 activation='relu',weight_init_std='relu',weight_decay_lamba=0,
                 use_dropout=False,dropout_ration=0.5,use_batch_norm=False):
        self.input_size=input_size
        self.hidden_size_list=hidden_size_list
        self.output_size=output_size
        self.hidden__layer_num=len(hidden_size_list)

        self.use_dropout=use_dropout
        self.weight_decay_lamba=weight_decay_lamba
        self.use_batch_norm=use_batch_norm
        self.params={}
        #初始化权重
        self.__init_weight(weight_init_std)
        #生成层
        activation_layer={'sigmoid':Sigmoid,'relu':Relu}
        self.layers=OrderedDict()
        for idx in range(1,self.hidden__layer_num+1):
            #仿射变换就是这样的，不需要什么别的修改了
            self.layers['Affine'+str(idx)]=Affine(self.params['W'+str(idx)],self.params['b'+str(idx)])
            #中间穿插了 batch_norm层
            if self.use_batch_norm:
                self.params['gamma'+str(idx)]=np.ones(hidden_size_list[idx-1])
                self.params['beta'+str(idx)]=np.zeros(hidden_size_list[idx-1])
                self.layers['BatchNorm'+str(idx)]=BatchNormalization(self.params['gamma'+str(idx)],self.params['beta'+str(idx)])
            #激活层
            self.layers['Activation_function'+str(idx)]=activation_layer[activation]()
            #这里还应该有一个drop_out层，我还没写

        #最后一层单独写
        idx=self.hidden__layer_num+1
        self.layers['Affine'+str(idx)]=Affine(self.params['W'+str(idx)],self.params['b'+str(idx)])
        self.last_layer=SoftmaxWithLoss()


    #用不同的标准差初始化权重
    def __init_weight(self,weight_init_std):
        '''weight_init_std是指定函数的标准差
        指定‘relu’或者'he'是设定'He'的初始值
        指定'sigmoid'或者‘xavier'是设定'xavier'的初始值'''
        #通过拼接列表，获取每一层的大小列表
        all_size_list=[self.input_size]+self.hidden_size_list+[self.output_size]
        #对每一层来初始化参数
        for idx in range(1,len(all_size_list)):
            scale=weight_init_std
            if str(weight_init_std).lower() in ('relu','he'):
                scale=np.sqrt(2.0/all_size_list[idx-1])
            elif str(weight_init_std).lower() in ('sigmoid','xavier'):
                scale=np.sqrt(1.0/all_size_list[idx-1])
            self.params['W'+str(idx)]=scale*np.random.randn(all_size_list[idx-1],all_size_list[idx])
            self.params['b'+str(idx)]=np.zeros(all_size_list[idx])

    #预测
    def predict(self,x,train_flg=False):
        for key,layer in self.layers.items():
            if "Dropout" in key or "BatchNorm" in key:
                x=layer.forward(x,train_flg)
            else:
                x=layer.forward(x)
        return x
    def loss(self,x,t,train_flg=False):
        y=self.predict(x,train_flg)
        #计算权重衰减，即为 L2正则化
        weight_decay=0
        for idx in range(1,self.hidden__layer_num+2):
            W=self.params['W'+str(idx)]
            #对每一层权重 W，计算其所有元素的平方和（np.sum(w**2)),再乘以 0.5✖️λ,最后累加得到 weight_decay
            weight_decay += 0.5*self.weight_decay_lamba * np.sum(W**2)
        #把weight_decay加上，惩罚 w 过大的情况
        return self.last_layer.forward(y,t)+weight_decay
    def accuracy(self,X,T):
        Y=self.predict(X,train_flg=True)
        Y=np.argmax(Y,axis=1)
        if T.ndim!=1 :T=np.argmax(T,axis=1)#将独热编码的也处理成一维的
        accuracy=np.sum(Y==T)/float(X.shape[0])
        return accuracy
    def gradient(self,x,t):
        self.loss(x,t,train_flg=True)
        d_out=1
        d_out=self.last_layer.backward(d_out)
        layers=list(self.layers.values())
        layers.reverse()
        for layer in layers:
            d_out=layer.backward(d_out)

        grads={}
        for idx in range(1,self.hidden__layer_num+2):
            grads['W'+str(idx)]=self.layers['Affine'+str(idx)].dW+self.weight_decay_lamba*self.params['W'+str(idx)]
            grads['b'+str(idx)]=self.layers['Affine'+str(idx)].db
            #有 batchnorm 并且不是最后一层：
            if self.use_batch_norm and idx != self.hidden__layer_num+1:
                grads['gamma'+str(idx)]=self.layers['BatchNorm'+str(idx)].d_gamma
                grads['beta'+str(idx)]=self.layers['BatchNorm'+str(idx)].d_beta

        return grads