# build in python3.5.2
# 作者：陈常鸿
# 一个隐藏层的神经网络，使用numpy形成矩阵，其他全部用python本身语句实现
import matplotlib.pyplot as plt
import numpy as np

def sigmoid(x):
    return 1/(1+np.exp(-x))

def tanh(x):
    return np.maximum(0,x)

x=np.asarray([1,2,3,4,5,6,7,8,9,10])
y=np.asarray([4,3,5,7,9,1,3,4,5,3])
w1=np.asarray([0.1,0.2,0.3,0.4,0.5,0.6,0.5,0.4,0.3,0.2])
b1=np.asarray([1,2,3,1,2,3,1,2,3,1])
w2=np.asarray([0.2,0.2,0.3,0.3,0.5,0.4,0.3,0.4,0.3,0.2])
b2=np.asarray([1,2,3,1,2,3,1,2,3,1])
def train(x,w1,b1,w2,b2,y):
    n=0
    while n<100:
        n+=1
        # FP
        a1 = []
        for i in range(10):
            a1.append(tanh(w1[i] * x[i] + b1[i]))
        a2 = []
        for j in range(10):
            a2.append(sigmoid(w2[j] * a1[j] + b2[j]))
        # BP    误差等于输出-y，delta=error*(output/(1-output))
        error = []
        for k in range(10):
            error.append(a2[k] - y[k])
        delta = []
        for p in range(10):
            delta.append(error[p] * (a2[p] / (1 - a2[p])))
        # 更新权重
        for i in range(10):
            w1[i] = w1[i] + delta[i] * x[i]  # * learning_rate
            w2[i] = w2[i] + delta[i] * x[i]
            #b1[i]=(b1[i]+error[i])
            #b2[i]=(b2[i]+error[i])
    return w1,w2
# predict
w1,w2=train(x,w1,b1,w2,b2,y)
output=[]
a1=[]
a2=[]
for i in range(10):
    a1.append(tanh(w1[i] * x[i] + b1[i]))
    output.append(w2[i] * a1[i] + b2[i])
plt.figure()
plt.plot(x,y,color='orange',label='origin')
plt.plot(x,output,color='blue',label='output')
plt.legend()
plt.show()