import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

"""
    防止过拟合：
    
        dropout 使用百分之 keep_prob用来计算模型，防止过拟合
"""


def dropout_test():
    x_data = np.linspace(-1, 1, 500)[:, None]
    noise_data = np.random.uniform(-0.02, 0.02, x_data.shape)
    y_data = np.square(x_data) + noise_data

    # 定义参数类型
    x = tf.placeholder(tf.float32, [None, 1])
    y = tf.placeholder(tf.float32, [None, 1])
    learning_rate = tf.Variable(0.01)
    keep_prob = tf.placeholder(tf.float32)

    # 定义L1神经网络
    Weight_L1 = tf.Variable(tf.random_normal([1, 100]))
    Basic_L1 = tf.Variable(tf.zeros([1, 100]))
    Publish_L1 = tf.nn.sigmoid(tf.matmul(x, Weight_L1) + Basic_L1)
    Dropout_L1 = tf.nn.dropout(Publish_L1, keep_prob)

    # 定义L2神经网络
    Weight_L2 = tf.Variable(tf.random_normal([100, 1]))
    Basic_L2 = tf.Variable(tf.zeros([1, 1]))
    logits = tf.matmul(Dropout_L1, Weight_L2) + Basic_L2

    # 交叉熵的损失值
    loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits))

    # 预测值
    prediction = tf.nn.sigmoid(logits)

    # 训练方法和设置学习率
    train = tf.train.AdamOptimizer(learning_rate).minimize(loss)

    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        for step in range(1, 5000):
            session.run(train, feed_dict={x: x_data, y: y_data, keep_prob: 0.93})
            if step % 1000 == 0:
                # 学习率下降
                session.run(tf.assign(learning_rate, learning_rate * 0.99))
                print("step:", step, "learning_rate:", session.run(learning_rate), "loss:",
                      session.run(loss, feed_dict={x: x_data, y: y_data, keep_prob: 0.93}))

        plt.figure()
        plt.scatter(x_data, y_data)
        plt.plot(x_data, session.run(prediction, feed_dict={x: x_data, y: y_data, keep_prob: 0.93}), c="r")
        plt.show()


if __name__ == "__main__":
    dropout_test()
