
import  numpy as np


x = np.array([
    [111,112,1,3],
    [121,122,2,4],
    [123,112,3,5],
    [0.53,5.41,21,31],
    [3.51,2.43,22,32],
    [5.54,1.49,23,34],
    [1.51,5.43,22,33],
    [4.54,3.49,21,30],
    [-23,-311,501,210],
    [-22,-223,500,220],
    [-31,-111,503,215],
])

y = np.array([
   0,0,0,1,1,1,1,1,2,2,2
])
np.random.seed(1)

w0 = np.random.random((2,3))
w0 =  np.random.random((4,3))
b0 = np.random.random((1,3))
w1 = np.random.random((20,1))
b1 = np.random.random((9,3))


def softmax_loss(x, y):
    probs = np.exp(x - np.max(x, axis=1, keepdims=True))
    probs /= np.sum(probs, axis=1, keepdims=True)
    N = x.shape[0]
    loss = -np.sum(np.log(probs[np.arange(N), y])) / N
    dx = probs.copy()
    # 导数
    dx[np.arange(N), y] -= 1
    #dx /= N

    return loss, dx

def batchGradientDescent(x, y, theta, alpha, m, maxIterations):
    xTrains = x.transpose()
    for i in range(0, maxIterations):
        loss,hypothesis = softmax_loss(np.dot(x, theta),y)

        print(loss)
        #loss = hypothesis - y
        #print(loss)
        gradient = np.dot(xTrains, hypothesis) / m
        theta = theta - alpha * gradient
    return theta

alpha = 0.03
m,n = x.shape
theta = batchGradientDescent(x,y,w0,alpha,m,100)
w0 = theta

x1 = np.array([
    [111,112,1,4],
    [-23,-221,3,200],
    [5.53,3.49,2,30],
    [1.53,5.49,2,30],
])
out=np.matmul(x1,w0)

probs = np.exp(out - np.max(out, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)

print(np.max(probs,1))
print("probs",probs)

#for i in range(100):
    #l1 = 1/(1+np.exp(-np.matmul(x,w0)+b0))
    #l1 = 1/(1+np.exp(-np.matmul(x,w0)))
    #拿到最终计算的得分
    #out=softmax(np.matmul(x,w0)+b0)


