import numpy as np
def softmax(x):
    a=np.max(x)
    return np.exp(x-a)/np.sum(np.exp(x-a))
def cross_entropy_error(y,t):
    delta=10e-7
    return -np.sum(t*np.log(y+delta))
def numerical_gradient(f, x):
    h = 1e-4 # 0.0001
    grad = np.zeros_like(x)
    
    #flags=[]用于获取元素索引，op_flags可以修改数组的值，否则是只读
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished: #it.finished迭代器有元素返回False，无元素返回True
        idx = it.multi_index
        print("-----idx------")
        print(idx)
        tmp_val = x[idx]
        x[idx] = float(tmp_val) + h
        fxh1 = f(x) # f(x+h)
        
        x[idx] = tmp_val - h 
        fxh2 = f(x) # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2*h)
        
        x[idx] = tmp_val # 还原值
        it.iternext()   #推进迭代器位置
        
    return grad

class SimpleNet:
    def __init__(self):
        self.W=np.random.randn(2,3)
    def predict(self,x): #生成预测值
        return np.dot(x,self.W)
    def loss(self,x,t):
        z=self.predict(x)
        y=softmax(z)
        loss=cross_entropy_error(y,t)
        return loss
         
if __name__=='__main__':
    net=SimpleNet()
    print("--------W的值----------")
    print(net.W) 
    x=np.array([0.6,0.9])   
    p=net.predict(x)
    print("--------最后一层无需激活函数-------")
    print(p)
    print("--------one-hot编码-------")
    index=int(np.argmax(p)) #最大值索引
    print(index)
    t=np.zeros(3)
    for i in range(len(t)):
        if i==index:
            t[i]=1
        else:
            t[i]=0
    print(t)  
    print("------损失函数的值-------")
    print(net.loss(x,t))   
    
    def f(W):
        return net.loss(x,t)
    
    dW=numerical_gradient(f,net.W)
    print("------dW-------")
    print(dW)  