import torch

batch = 100  # 在一个批次中输入数据的数量
hidden_layer = 3  # 隐藏层单元数量
input_num = 12  # 每个数据包含的数据特征
output_num = 2  # 输出层单元数量
w2 = torch.randn(input_num, hidden_layer)
b2 = torch.randn(hidden_layer)
w3 = torch.randn(hidden_layer, output_num)
b3 = torch.randn(output_num)
x = torch.randn(batch, input_num)
y = torch.randn(batch, output_num)
learning_rate = 0.01

for i in range(40):
    # 将b2进行广播 增加一个维度
    z2 = (x @ w2) + b2.expand(batch, -1)
    # print(z1.shape)
    a2 = 1/(1+torch.exp(-z2))
    # print(a2.shape)  # (batch,hidden_layer)
    z3 = a2 @ w3 + b3.expand(batch, -1)
    a3 = 1/(1+torch.exp(-z3))
    # print(a3)
    loss = ((a3 - y)).pow(2).sum()
    print(loss)
    grad_l = 2 * (a3 - y)
    grad_z3 = grad_l*a3*(1-a3)
    grad_b3 = grad_z3.sum(dim=0)  # 对batch维度求和
    grad_w3 = a2.T @ grad_z3
    grad_a2 = grad_z3 @ w3.T
    grad_z2 = grad_a2*a2*(1-a2)
    grad_b2 = grad_z2.sum(dim=0)  # 对batch维度求和
    grad_w2 = x.T @ grad_z2

    w2 -= learning_rate * grad_w2
    w3 -= learning_rate * grad_w3
    b2 -= learning_rate * grad_b2
    b3 -= learning_rate * grad_b3
