#!/usr/bin/python
# -*- coding: utf-8 -*-

import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

"""
zip()函数： tutorial from runoob
>>> a = [1,2,3]
>>> b = [4,5,6]
>>> c = [4,5,6,7,8]
>>> zipped = zip(a,b)     # 返回一个对象
>>> zipped
<zip object at 0x103abc288>
>>> list(zipped)  # list() 转换为列表
[(1, 4), (2, 5), (3, 6)]
>>> list(zip(a,c))              # 元素个数与最短的列表一致
[(1, 4), (2, 5), (3, 6)]

>>> a1, a2 = zip(*zip(a,b))          # 与 zip 相反，zip(*) 可理解为解压，返回二维矩阵式
>>> list(a1)
[1, 2, 3]
>>> list(a2)
[4, 5, 6]
>>>
"""

input_x = np.float32(np.linspace(-1, 1, 100))  # 在区间（-1，1）内产生100个数的等差数列
input_y = 2 * input_x + np.random.randn(*input_x.shape) * 0.3  # y = 2x + 随机噪声
weight = tf.Variable(1., dtype=tf.float32, name='weight')
bias = tf.Variable(1., dtype=tf.float32, name='bias')

def model(x):
    pred = tf.multiply(x, weight) + bias
    return pred

step = 0
opt = tf.optimizers.Adam(1e-1)  # 选择优化器，梯度下降方法
for x, y in zip(input_x, input_y):
    x = np.reshape(x, [1])
    y = np.reshape(y, [1])
    step = step + 1
    with tf.GradientTape() as tape:
        loss = tf.losses.MeanSquaredError()(model(x), y)
    grads = tape.gradient(loss, [weight, bias])
    opt.apply_gradients(zip(grads, [weight,bias]))
    print("Step:", step, "Traing Loss:", loss.numpy())
    # 用Matplotlib可视化原始数据和预测的模型
    plt.plot(input_x, input_y, 'ro', label = 'original data')
    plt.plot(input_x, model(input_x), label = 'predicated value')
    plt.plot(input_x, 2*input_x, label = 'y=2x')
    plt.legend()
    plt.show()
    print(weight)
    print(bias)



