import numpy as np
from 深度学习入门基于python的理论与实践.dataset.mnist import load_mnist
def mean_squared_error(y, t):
    return 0.5 * np.sum((y-t) ** 2)

def cross_entropy_error(y, t):
    delta = 1e-7 #目的是为了避免在计算对数时出现 log(0) 的情况，因为 log(0) 是未定义的，会导致计算错误
    return -np.sum(t * np.log(y + delta))

def cross_entropy_error_onehot_vec(y, t):
    if y.ndim == 1:
        t = t.reshape(1, t.size)
        y = y.reshape(1, y.size)
    batch_size = y.shape[0]
    print(y.shape)
    return -np.sum(t * np.log(y + 1e-7)) / batch_size

def cross_entropy_error_vec(y, t):
    if y.ndim == 1:
        t = t.reshape(1, t.size)
        y = y.reshape(1, y.size)
    batch_size = y.shape[0]
    return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size


#导数
def numerical_diff(f, x):
    h = 1e-4 # 0.0001
    return (f(x + h) - f(x - h)) / (2*h)

def function_1(x):
    return 0.01 * x ** 2 + 0.1 * x

def function_0(x):
    return x ** 2

def function_2(x):
    return np.sum(x ** 2)

#偏导数
def numerical_gradient(f, x):
    h = 1e-4 # 0.0001
    grad = np.zeros_like(x) # 梯度（导数）
    for idx in range(x.size):
        tmp_val = x[idx]
        x[idx] = tmp_val + h
        fxh1 = f(x)
        x[idx] = tmp_val - h
        fxh2 = f(x)
        grad[idx] = (fxh1 - fxh2) / (2*h)
        x[idx] = tmp_val # 还原值
    return grad

#梯度下降
def gradient_descent(f, init_x, lr=0.01, step_num=100):
    x = init_x
    for i in range(step_num):
        grad = numerical_gradient(f, x)
        x -= lr * grad
    return x


if __name__ == '__main__':
    y = np.array([0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0])
    t = np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0])
    print(mean_squared_error(y, t)) #0.09750000000000003

    y2 = np.array([0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0])
    print(mean_squared_error(y2, t)) #0.5975

    print(cross_entropy_error(y, t)) #0.510825457099338
    print(cross_entropy_error(y2, t)) #2.302584092994546

    print(cross_entropy_error_onehot_vec(y, t))

    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
    print(x_train.shape)
    print(t_train.shape)

    #从训练数据中随机抽取数据
    train_size = x_train.shape[0]
    batch_size = 10
    batch_mask = np.random.choice(train_size, batch_size)
    x_batch = x_train[batch_mask]
    t_batch = t_train[batch_mask]


    print(numerical_diff(function_1, 10))
    print(numerical_diff(function_0, 2))


    print(numerical_gradient(function_2, np.array([3.0, 4.0])))

    #梯度下降
    init_x = np.array([-3.0, 4.0])
    graddesResult=gradient_descent(function_2, init_x=init_x, lr=0.1, step_num=100)
    print(graddesResult)