import tensorflow as tf
import numpy as np
import matplotlib_test.pyplot as plt

""" 矩阵的使用
"""


def tensor_function():
    """
    f.random_normal | tf.truncated_normal | tf.random_uniform | tf.zeros | tf.ones | tf.eye

        random_normal: 正太分布随机数，均值mean,标准差stddev
        truncated_normal:截断正态分布随机数，均值mean,标准差stddev,不过只保留[mean-2*stddev,mean+2*stddev]范围内的随机数
        random_uniform:均匀分布随机数，范围为[minval,maxval]
        tf.zeros: 生成一个全部是0的矩阵[行，列]
        tf.ones： 生成一个全部是1的矩阵[行，列]
        tf.eye :  生成一个单位矩阵。num_columns列

    """

    # 生成零矩阵[行，列]  tf.zeros([行,列])
    var1 = tf.Variable(tf.zeros([1, 10]))

    # 生成随机矩阵[行，列] tf.random_normal([行, 列]
    var2 = tf.Variable(tf.random_normal([1, 10]))

    # 生成1的矩阵
    var3 = tf.ones([2, 2])

    # 生成一个num_columns行2列的单位矩阵
    var4 = tf.eye(2, num_columns=3)

    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
    print(session.run([var1, var2, var3, var4, tf.multiply(var1, var2)]))


"""梯度下降
"""


def fun_line_train():
    x_data = np.random.rand(100).astype(np.float32)
    y_data = np.square(x_data) * 1.4 + 0.43
    ax = show_layer(x_data, y_data)
    w = tf.Variable(tf.random_uniform([1], -1, 1))
    b = tf.Variable(tf.zeros([1]))
    y = w * tf.square(x_data) + b
    train = tf.train.GradientDescentOptimizer(0.12).minimize(tf.reduce_mean(tf.square(y - y_data)))
    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        for i in range(1000):
            session.run(train)
        prediction_value = session.run(w[0]) * np.square(x_data) + session.run(b[0])
        ax.scatter(x_data, prediction_value, c="g", s=1)
        print(session.run([w[0], b[0]]))
        plt.ion()
        plt.show()


def show_layer(x_data, y_data):
    # plot the real data
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.scatter(x_data, y_data, c="y")
    return ax


fun_line_train()
