from collections import OrderedDict
import numpy as np
from layers import *
class TwoLayerNet:
    def __init__(self,input_size,hidden_size,output_size,weight_init_std=0.01):
        self.params={}
        #Xavier 初始化
        self.params['W1']=np.random.randn(input_size,hidden_size)/np.sqrt(input_size)
        self.params['b1']=np.zeros(hidden_size)
        self.params['W2']=np.random.randn(hidden_size,output_size)/np.sqrt(hidden_size)
        self.params['b2']=np.zeros(output_size)
        self.layers=OrderedDict()
        self.layers['Affine1']=Affine(self.params['W1'],self.params['b1'])
        self.layers['Relu']=Relu()
        self.layers['Affine2']=Affine(self.params['W2'],self.params['b2'])
        self.last_layer=SoftmaxWithLoss()
    def predict(self,x):
        a1=self.layers['Affine1'].forward(x)
        z1=self.layers['Relu'].forward(a1)
        a2=self.layers['Affine2'].forward(z1)
        return a2
    def loss(self,x,t):
        y=self.predict(x)
        return self.last_layer.forward(y,t)
    def accuracy(self,x,t):
        y=self.predict(x)
        y=np.argmax(y,axis=1)
        if t.ndim!=1 :t=np.argmax(t,axis=1)#假如是独热编码的话，就获取序号的最大值
        acc=np.sum(y==t)/ float(x.shape[0])
        return acc
    def gradient(self,x,t):
        self.loss(x,t)
        d_out=1
        d_out=self.last_layer.backward(d_out)
        l=list(self.layers.values())
        l.reverse()
        for layer in l:
            d_out=layer.backward(d_out)
        grads={}
        grads['W1']=self.layers['Affine1'].dW
        grads['b1']=self.layers['Affine1'].db
        grads['W2']=self.layers['Affine2'].dW
        grads['b2']=self.layers['Affine2'].db
        return grads





