import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


def fun():
    x_data = np.linspace(-0.5, 0.5, 200)[:, None]
    noise_data = np.random.uniform(-0.02, 0.02, x_data.shape)
    y_data = np.square(x_data) + noise_data

    # 定义参数类型
    x = tf.placeholder(tf.float32, [None, 1])
    y = tf.placeholder(tf.float32, [None, 1])

    # 定义L1神经网络
    Weight_L1 = tf.Variable(tf.random_normal([1, 10]))
    Basic_L1 = tf.Variable(tf.zeros([1, 10]))
    Publish_L1 = tf.nn.tanh(tf.matmul(x, Weight_L1) + Basic_L1)

    # 定义L2神经网络
    Weight_L2 = tf.Variable(tf.random_normal([10, 1]))
    Basic_L2 = tf.Variable(tf.zeros([1, 1]))
    prediction = tf.nn.tanh(tf.matmul(Publish_L1, Weight_L2) + Basic_L2)

    loss = tf.reduce_mean(tf.square(y - prediction))
    #
    train = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        for _ in range(1, 40000):
            session.run(train, feed_dict={x: x_data, y: y_data})
        plt.figure()
        plt.scatter(x_data, y_data)
        plt.scatter(x_data, session.run(prediction, feed_dict={x: x_data, y: y_data}), c="r")
        plt.show()


def fun2():
    x_data = np.linspace(-1, 1, 500)[:, None]
    noise_data = np.random.uniform(-0.02, 0.02, x_data.shape)
    y_data = np.square(x_data) + noise_data

    # 定义参数类型
    x = tf.placeholder(tf.float32, [None, 1])
    y = tf.placeholder(tf.float32, [None, 1])

    # 定义L1神经网络
    Weight_L1 = tf.Variable(tf.random_normal([1, 100]))
    Basic_L1 = tf.Variable(tf.zeros([1, 100]))
    Publish_L1 = tf.nn.sigmoid(tf.matmul(x, Weight_L1) + Basic_L1)

    # 定义L2神经网络
    Weight_L2 = tf.Variable(tf.random_normal([100, 1]))
    Basic_L2 = tf.Variable(tf.zeros([1, 1]))
    logits = tf.matmul(Publish_L1, Weight_L2) + Basic_L2

    # 交叉熵的损失值
    loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits))

    # 预测值
    prediction = tf.nn.sigmoid(logits)

    # 训练方法和设置学习率
    train = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        for _ in range(1, 10000):
            session.run(train, feed_dict={x: x_data, y: y_data})
        plt.figure()
        plt.scatter(x_data, y_data)
        plt.scatter(x_data, session.run(prediction, feed_dict={x: x_data, y: y_data}), c="r")
        plt.show()


if __name__ == "__main__":
    fun2()
