import numpy as np
import sys
sys.path.append("/home/cyq610664915/deeplearning/data")
from dataset.mnist import load_mnist
def softmax(x):
    a=np.max(x)
    return np.exp(x-a)/np.sum(np.exp(x-a))
def sigmoid(x):
    return 1/1+np.exp(-x)
def cross_entropy_error(y,t):
    return -np.sum(t*np.log(y))
def numerical_gradient(f, x):
    h = 1e-4 # 0.0001
    grad = np.zeros_like(x)
    
    #flags=[]用于获取元素索引，op_flags可以修改数组的值，否则是只读
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished: #it.finished迭代器有元素返回False，无元素返回True
        idx = it.multi_index
        tmp_val = x[idx]
        x[idx] =tmp_val + h
        fxh1 = f(x) # f(x+h)
        
        x[idx] = tmp_val - h 
        fxh2 = f(x) # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2*h)
        
        x[idx] = tmp_val # 还原值
        it.iternext()   #推进迭代器位置
        
    return grad
#x(样本个数，参数个数=input_size) w1(input_size,w1神经元个数=hidden_size) w2(hidden_size,w2神经元个数=output_size)
class TwoLayerNet:
    def __init__(self,input_size,hidden_size,output_size,
                 weight_init_std=0.01):
        self.params={}
        self.params["W1"]=weight_init_std *np.random.randn(input_size,hidden_size)
        self.params["b1"]=np.zeros(hidden_size)
        self.params["W2"]=weight_init_std *np.random.randn(hidden_size,output_size)
        self.params["b2"]=np.zeros(output_size)
    def predict(self,x):
        W1,W2=self.params["W1"],self.params["W2"]
        b1,b2=self.params["b1"],self.params["b2"]
        a1=np.dot(x,W1)+b1
        z1=sigmoid(a1)
        a2=np.dot(z1,W2)+b2
        y=softmax(a2)
        return y
    def loss(self,x,t):#x,w->softmax,t->loss
        y=self.predict(x)
        return cross_entropy_error(y,t)
    def accuracy(self,x,t):
        y=self.predict(x)
        y=np.argmax(y,axis=1)#查找出最大值的索引，axis=1表示横轴寻找
        t=np.argmax(t,axis=1)
        accuracy=np.sum(y==t)/float(x.shape[0])
        return accuracy
    def numerical_gradient(self,x,t):#x,w->softmax,t->loss,w->grad
        loss_W=lambda W:self.loss(x,t)
        grads={}
        grads["W1"]=numerical_gradient(loss_W,self.params["W1"])
        grads["b1"]=numerical_gradient(loss_W,self.params["b1"])
        grads["W2"]=numerical_gradient(loss_W,self.params["W2"])
        grads["b2"]=numerical_gradient(loss_W,self.params["b2"])
        return grads
(x_train,t_train),(x_test,t_test)=load_mnist(normalize=True,one_hot_label=True)#normalize=True归一化处理，
train_loss_list=[] 

#x_train(60000,784)784是像素值数据
iters_num=10000 #梯度下降次数
train_size=x_train.shape[0]#样本个数
batch_size=100 #mini-batch数量
learing_rate=0.1 #学习率
network=TwoLayerNet(input_size=784,hidden_size=50,output_size=10)
for i in range(iters_num):
    batch_mask=np.random.choice(train_size,batch_size)    #生成范围是train_size数量是batch_size的不重复的数组
    x_batch=x_train[batch_mask] #生成mini-batch参数
    t_batch=t_train[batch_mask]

    grad=network.numerical_gradient(x_batch,t_batch)
    for key in ("W1","b1","W2","b2"):
        network.params[key]-=learing_rate*grad[key]
    loss=network.loss(x_batch,t_batch)
    train_loss_list.append(loss)
    print("")
     
    