import numpy as np
import sys,os
sys.path.append(os.pardir)
from common.gradient import numerical_gradient
from common.functions import softmax,cross_entropy_error
# def softmax(a):
#     c = np.max(a)
#     exp_a = np.exp(a - c)
#     sum_exp_a = np.sum(exp_a)
#     y = exp_a / sum_exp_a
#     return y

# def cross_entropy_error(y,t): # 实现对应 mini-batch 的交叉熵误差
#     if y.ndim == 1:
#         t = t.rehsape(1,t.size) # 转化为1*t.size大小的数组
#         y = y.reshape(1,y.size) # 转化为1*y.size大小的数组
#     batch_size = y.shape[0]
#     return -np.sum(t*np.log(y+1e-7))/batch_size

class simpleNet:
    def __init__(self):
        self.W = np.random.randn(2,3) # 用高斯分布进行初始化
    
    def predict(self,x):
        return np.dot(x,self.W)
    
    def loss(self,x,t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y,t) # t表示监督数据
        return loss

net = simpleNet()
# print(net.W)
x = np.array([0.6,0.9])
p = net.predict(x)
# print(p)
# print(np.argmax(p)) # 最大值的索引
t = np.array([0,0,1]) # 正确解标签
# print(net.loss(x,t)) # 损失函数

# def f(W):
#     return net.loss(x,t)
f = lambda w:net.loss(x,t)
dW = numerical_gradient(f,net.W)
print(dW)
