# TODO: 导数计算
# DATE: 2022/3/22
# AUTHOR: Cheng Ze WUST
import numpy as np
import matplotlib.pyplot as plt

#region 公式法导数计算
def f(x):
    return x**2+2
x=np.arange(0.0,20.0,0.1)
y=f(x)
# plt.xlabel('x')
# plt.ylabel('f(x)')
# plt.plot(x,y)
# plt.grid()
# plt.show()

def derivative1(f,x):
    h=1e-4
    return (f(x+h)-f(x))/h
print(derivative1(f,10))
#endregion

#region 偏导数计算
def fn(x):
    return x[0]**2+x[1]**2

#x0=5,x1=9,x0的偏导
def f1(x0):
    return x0**2+9**2

print(derivative1(f1,5))
#endregion

#region 梯度
def numerical_gradient(f,x):
    h=1e-4
    grad=np.zeros_like(x)   #生成与x形状相同的数组
    for i in range(x.size):
        tmp_value=x[i]
        x[i]=tmp_value+h
        fxh1=f(x)
        x[i]=tmp_value-h
        fxh2=f(x)

        grad[i]=(fxh1-fxh2)/(2*h)
        x[i]=tmp_value
    return grad

print(numerical_gradient(fn,np.array([3.0,4.0])))
#endregion

#region 梯度下降法
def gradient_descent(f,init_x,lr=0.01,step_num=100):
    x=init_x
    for i in range(step_num):
        grad=numerical_gradient(f,x)
        x -= lr*grad    #学习率*偏导
    return x
init_x=np.array([-3.,4.])
print(gradient_descent(fn,init_x,lr=0.1))

def fc(x):
    return (x[0]+3)**2+(x[1]-2)**2
print(gradient_descent(fn,init_x,lr=0.1))


def gradient_descent_history(f,init_x,lr=0.01,step_num=100):
    x=init_x
    x_history=[]
    for i in range(step_num):
        x_history.append(x.copy())
        grad=numerical_gradient(f,x)
        x -= lr*grad    #学习率*偏导
    return x,np.array(x_history)

x,x_history = gradient_descent_history(fc,init_x,lr=0.1,step_num=20)
plt.plot([-5,5],[0,0],'--r')
plt.plot([0,0],[-5,5],'--b')
plt.plot(x_history[:,0],x_history[:,1],'o')
plt.xlim(-3.5,3.5)
plt.ylim(-4.5,4.5)
plt.xlabel('x0')
plt.ylabel('x1')
plt.show()
#endregion


#region 计算神经网络梯度
def cross_entropy_error(y,t):
    delta=1e-7
    return -np.sum(t*np.log(y+delta))

def softmax(a):
    c=np.max(a)
    exp_a=np.exp(a-c)
    sum_exp_a=np.sum(exp_a)
    y=exp_a/sum_exp_a
    return y

def numerical_gradient_2D(f,x):
    h=1e-4
    grad=np.zeros_like(x).reshape(2,3)   #生成与x形状相同的数组
    for i in range(x.shape[0]):
        for j in range(x.shape[1]):
            tmp_value=x[i][j]
            x[i][j]=tmp_value+h
            fxh1=f(x)
            x[i][j]=tmp_value-h
            fxh2=f(x)
            grad[i][j]=(fxh1-fxh2)/(2*h)
            x[i][j]=tmp_value
    return grad

class SimpleNet:
    def __init__(self):
        self.W=np.random.randn(2,3)
    def predict(self,x):
        return np.dot(x,self.W)
    def loss(self,x,t):
        z=self.predict(x)
        y=softmax(z)
        loss=cross_entropy_error(y,t)
        return loss

net=SimpleNet()
print(net.W)
x=np.array([0.6,0.9])
p=net.predict(x)
print(p)

print(np.argmax(p))

t=np.array([0,1,0])
print(net.loss(x,t))

def f(W):
    return net.loss(x,t)

dW=numerical_gradient_2D(f,net.W)
print(dW)
#endregion





