import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
import tensorflow as tf


tf.app.flags.DEFINE_integer("max_step", 1000, "训练模型的步数")
FLAGS = tf.app.flags.FLAGS


def linear_regression():
    """
    TensorFlow实现简单的线性回归
    :return:
    """
    # 1. 创建特征数据
    with tf.compat.v1.variable_scope("original_data"):
        x = tf.random.normal(shape=(100, 1), mean=0.0, stddev=1.0, name="original_data_x")
        k = tf.constant([[0.8]], name="original_data_k")
        b = tf.constant([[0.7]], name="original_data_b")

    # 2. 创建目标数据: y = w * x + b
    with tf.compat.v1.variable_scope("target_data"):
        y_true = tf.matmul(x, k) + b

    # 3. 初始化权重和偏置参数
    # tf.Variable特点：
    # 存储持久化
    # 可修改值
    # 可以指定被训练
    # 如果trainnable被指定为False（默认是True），那么在训练的时候weights和bias的值不会改变
    with tf.compat.v1.variable_scope("pre_param"):
        weights = tf.Variable(initial_value=tf.random.normal(shape=(1, 1), mean=0.0, stddev=1.0),
                              trainable=True, name="pre_param_weights")
        bias = tf.Variable(initial_value=tf.random.normal(shape=(1, 1), mean=0.0, stddev=1.0),
                           trainable=True, name="pre_param_bias")

    # 4. 进行预测
    with tf.compat.v1.variable_scope("pre_result"):
        y_pre = tf.matmul(x, weights) + bias

    # 5. 计算损失
    with tf.compat.v1.variable_scope("compute_loss"):
        loss = tf.reduce_mean(tf.square(y_true - y_pre), name="loss")

    # 6. 进行优化
    with tf.compat.v1.variable_scope("optimizer_op"):
        optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)

    # 7. 初始化参数
    init_op = tf.compat.v1.global_variables_initializer()

    # 收集变量
    tf.summary.scalar("loss", loss)  # 收集0维度的张量
    tf.summary.histogram("weights", weights)  # 收集多维度的张量
    tf.summary.histogram("bias", bias)

    # 合并变量
    merge = tf.summary.merge_all()

    # 8. 开启会话
    with tf.compat.v1.Session() as sess:
        # 初始化参数
        sess.run(init_op)

        # 创建事件文件
        file_writer = tf.summary.FileWriter(logdir="./tmp/summary/", graph=sess.graph)

        for i in range(FLAGS.max_step):
            sess.run(optimizer)
            if i % 100 == 0:
                print("loss:", loss.eval())
                print("weights:", sess.run(weights))
                print("bias:", bias.eval())
            # 运行合并张量
            summary = sess.run(merge)
            file_writer.add_summary(summary, i)
    return None


if __name__ == '__main__':
    linear_regression()
