import numpy as np


def sigmod(x):
    return 1/(1+np.exp(-x))

data_in = np.array([0.05, 0.10])
data_out = np.array([0.01, 0.99])
print(data_in.shape)
print(data_in.ndim)

weight1 = np.array([[0.15,0.20],[0.25,0.30]])
weight2 = np.array([[0.40,0.45], [0.50,0.55]])

b1 = 0.35
b2 = 0.60

print(weight1.shape)
print(weight1.ndim)

net_h1 = np.dot(weight1[0], data_in) + b1
net_h2 = np.dot(weight1[1], data_in) + b1

out_h1 = sigmod(net_h1)
out_h2 = sigmod(net_h2)
print(out_h1)
print(out_h2)

layer1 = np.array([out_h1, out_h2])
net_o1 = np.dot(weight2[0], layer1) + b2
net_o2 = np.dot(weight2[1], layer1) + b2

out_o1 = sigmod(net_o1)
out_o2 = sigmod(net_o2)

print(out_o1)
print(out_o2)

loss_o1 = np.square(data_out[0] - out_o1)/2
loss_o2 = np.square(data_out[1] - out_o2)/2
loss_total =  loss_o1 + loss_o2

print(loss_total)

derivative_loss_o1 = -(data_out[0] - out_o1)
derivative_o1_n1 = out_o1 * (1-out_o1)
derivative_n1_w5 = out_h1

derivative_loss_w5 = derivative_loss_o1 * derivative_o1_n1 * derivative_n1_w5

print(derivative_loss_w5)

derivative_n1_w6 = out_h2

derivative_loss_w6 = derivative_loss_o1 * derivative_o1_n1 * derivative_n1_w6

print(weight2[0][1] - 0.5*derivative_loss_w6)


derivative_oh1_nh1 = out_h1 * (1-out_h1)

derivative_output1 = derivative_loss_o1 * derivative_o1_n1

derivative_loss_o2 = -(data_out[1] - out_o2)
derivative_o2_n2 = out_o2 * (1-out_o2)

deriavative_output2 = derivative_loss_o2 * derivative_o2_n2


# 对于每一层而言，每一个权值的更新都等于该层的损失*上一层的输出值
derivative_loss_w7 = deriavative_output2 * out_h1
derivative_loss_w8 = deriavative_output2 * out_h2

print(weight2[1][0] - 0.5 * derivative_loss_w7)
print(weight2[1][1] - 0.5 * derivative_loss_w8)

derivative_loss1_o1 = derivative_loss_o1
# 该输出损失与输出激活前导数 = 该输出损失与输出激活后导数 * 输出激活函数前后值导数
derivative_loss1_no1 = derivative_loss1_o1 * derivative_o1_n1

derivative_no1_oh1 = weight2[0][0]

# 该单元影响的某个输出损失与该单元激活输出导数 = 该输出损失与输出激活前导数 * 输出激活前与其连接输入的导数（即权值）
derivative_loss1_oh1 = derivative_loss1_no1 * derivative_no1_oh1

derivative_loss2_o2 = derivative_loss_o2
derivative_loss2_no2 = derivative_loss2_o2 * derivative_o2_n2
derivative_no2_oh1 = weight2[1][0]
derivative_loss2_oh1 = derivative_loss2_no2 * derivative_no2_oh1

# 总损失对该单元激活输出的导数 = 该单元能影响的所有的输出值的损失与该单元激活输出导数之和
derivative_loss_oh1 = derivative_loss1_oh1 + derivative_loss2_oh1

derivative_nh1_w1 = data_in[0]

# 该层该单元的损失 = 总损失对该单元激活输出的导数 * 该单元激活函数前后值导数
derivative_layer1 = derivative_loss_oh1 * derivative_oh1_nh1

# 对于每一层中的每个单元而言，其输入权值的更新都等于该层该单元的损失*上一层连接单元输出值
derivative_loss_w1 = derivative_layer1 * derivative_nh1_w1

print(derivative_loss_w1)

derivative_loss_w2 = derivative_layer1 * data_in[1]

print(weight1[0][1] - 0.5*derivative_loss_w2)