import tensorflow as tf

def old_cases():
    a = tf.constant([[1, 2], [3, 4]])
    b = tf.constant([[5, 6], [7, 8]])
    c = tf.matmul(a, b)
    print(c)

    x = tf.Variable(initial_value=3.)

    with tf.GradientTape() as tape:
        z = 2 * x
        y = tf.square(z)
    y_grad = tape.gradient(y, x)
    print([y, y_grad])

def old_case_2():
    X = tf.constant([[1,2], [3,4]], dtype=tf.float32)
    y = tf.constant([[1],[2]], dtype=tf.float32)
    w = tf.Variable(initial_value=[[1],[2]], dtype=tf.float32)
    b = tf.Variable(initial_value=1.)
    with tf.GradientTape() as tape:
        L = 0.5 * tf.reduce_sum(tf.square(tf.matmul(X, w) + b - y))
    w_grad, b_grad = tape.gradient(L, [w,b])
    print([L.numpy(), w_grad.numpy(), b_grad.numpy()])

import numpy as np

X_raw = np.array([2013, 2014, 2015, 2016, 2017], dtype=np.float32)
y_raw = np.array([12000, 14000, 15000, 16500, 17500], dtype=np.float32)

X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())

def numpy_linear():
    a, b = 0, 0

    num_epoch = 10000
    learning_rate = 1e-3
    for e in range(num_epoch):
        # 手动计算损失函数关于自变量（模型参数）的梯度
        y_pred = a * X + b
        grad_a, grad_b = (y_pred - y).dot(X), (y_pred - y).sum()

        # 更新参数
        a, b = a - learning_rate * grad_a, b - learning_rate * grad_b

    print(a, b)

tf.keras.applications.ResNet50()