import numpy as np
def softmax(x):
    a=np.max(x)
    return np.exp(x-a)/np.sum(np.exp(x-a))
def sigmoid(x):
    return 1/1+np.exp(-x)
def cross_entropy_error(y,t):
    return -np.sum(t*np.log(y))
def numerical_gradient(f, x):
    h = 1e-4 # 0.0001
    grad = np.zeros_like(x)
    
    #flags=[]用于获取元素索引，op_flags可以修改数组的值，否则是只读
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished: #it.finished迭代器有元素返回False，无元素返回True
        idx = it.multi_index
        tmp_val = x[idx]
        x[idx] =tmp_val + h
        fxh1 = f(x) # f(x+h)A
        
        x[idx] = tmp_val - h 
        fxh2 = f(x) # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2*h)
        
        x[idx] = tmp_val # 还原值
        it.iternext()   #推进迭代器位置
        
    return grad
#x(样本个数，参数个数=input_size) w1(input_size,w1神经元个数=hidden_size) w2(hidden_size,w2神经元个数=output_size)
class TwoLayerNet:
    def __init__(self,input_size,hidden_size,output_size,
                 weight_init_std=0.01):
        self.params={}
        self.params["W1"]=weight_init_std *np.random.randn(input_size,hidden_size)
        self.params["b1"]=np.zeros(hidden_size)
        self.params["W2"]=weight_init_std *np.random.randn(hidden_size,output_size)
        self.params["b2"]=np.zeros(output_size)
    def predict(self,x):
        W1,W2=self.params["W1"],self.params["W2"]
        b1,b2=self.params["b1"],self.params["b2"]
        a1=np.dot(x,W1)+b1
        z1=sigmoid(a1)
        a2=np.dot(z1,W2)+b2
        y=softmax(a2)
        return y  #这里y是一个关于W1,b1,W2,b2,的函数，所以loss也是一个关于W1,b1,W2,b2,的函数
    def loss(self,x,t):#x,w->softmax,t->loss
        y=self.predict(x)
        return cross_entropy_error(y,t)
    def accuracy(self,x,t):
        y=self.predict(x)
        y=np.argmax(y,axis=1)#查找出最大值的索引，axis=1表示横轴寻找
        t=np.argmax(t,axis=1)
        accuracy=np.sum(y==t)/float(x.shape[0])
        return accuracy
    def numerical_gradient(self,x,t):#x,w->softmax,t->loss,w->grad
        loss_W=lambda W:self.loss(x,t)
        grads={}
        grads["W1"]=numerical_gradient(loss_W,self.params["W1"])
        grads["b1"]=numerical_gradient(loss_W,self.params["b1"])
        grads["W2"]=numerical_gradient(loss_W,self.params["W2"])
        grads["b2"]=numerical_gradient(loss_W,self.params["b2"])
        return grads
net=TwoLayerNet(input_size=784,hidden_size=100,output_size=10)
print(net.params["W1"].shape)            
print(net.params["b1"].shape)            
print(net.params["W2"].shape)            
print(net.params["b2"].shape)            
x=np.random.rand(100,784) 
y=net.predict(x)
print(y)
t=np.zeros_like(y)
y_max_index=np.argmax(y,axis=1)
t[np.arange(y.shape[0]),y_max_index]=1
print(t)
# t1=np.array([[0,0],[0,0]])
# t1[[0,1],[0,1]]=1
# print(t1) t1=[[1,0],[0,1]]
grads=net.numerical_gradient(x,t)   
print("------------------W1------------------")
print(grads["W1"])      
print("------------------b1------------------")
print(grads["b1"])      
print("------------------W2------------------")
print(grads["W2"])      
print("------------------b2------------------")
print(grads["b2"])      