import tensorflow as tf

def sigmoid(x,deriv = False):
    if(deriv):
        return x*(1-x)
    return 1/(1+tf.exp(-x))

x = tf.Variable([[0.09,0.15],[0.12,0.19],[0.91,0.91],[0.99,0.96],[0.95,0.96]],tf.float32,name="x-1")
w0 = tf.Variable(tf.random_normal([2,3],dtype=tf.float32,stddev=0.35))
w1 = tf.Variable(tf.random_normal([3,1],dtype=tf.float32,stddev=0.35))

y = tf.Variable(
    [
        [0.99],
        [0.98],
        [0.1],
        [0.12],
        [0.11]
    ],dtype=tf.float32
)
init = tf.global_variables_initializer()

saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(init)
    print(w0.eval())
    print(w1.eval())
    for i in range(300):
        # 计算隐层1 ，经过sigmoid激活函数
        l1 = sigmoid(tf.matmul(x,w0))
        # 计算预期结果，经过sigmoid激活函数
        _y = sigmoid(tf.matmul(l1,w1))
        # 判断预期值和正确值之间的差异
        l2_error = _y - y
        # 反向传播 ，讲预期值和差异值进行计算
        l2_delta = l2_error * sigmoid(_y,True)

        # 讲delta 值进行计算，对矩阵w1进行计算
        l0_error = tf.matmul(l2_delta,tf.transpose(w1))
        #
        l0_delta = l0_error * sigmoid(l1,True)
        #修正最后一个权重值
        w1 -= tf.matmul(tf.transpose(l1),l2_delta)
        #修正前一个（第一个）权重值
        w0 -= tf.matmul(tf.transpose(x),l0_delta)

        # value = sess.run(w0,feed_dict={x:[[0.1,0.2],[0.3,0.2],[0.89,0.91],[0.75,0.86]]})

        if(i%100==0):
            loss = tf.reduce_mean(tf.square(_y - y))
            
            print("loss:"+str(loss.eval()))
            saver.save(sess,"d:/box/demo1")


    x1 = [[0.81,0.91]]
    l1_test = sigmoid(tf.matmul(x1,w0))
    _y_test = sigmoid(tf.matmul(l1_test,w1))
    print(_y_test.eval())