import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.utils import shuffle
from sklearn.preprocessing import scale

# ===== 读取数据 =====
df = pd.read_csv('boston.csv', header=0)
ds = df.values
# print(df.describe())
# print(df.head(3))
# print(df.tail(3))

# ===== 划分特征数据、标签数据 =====
# x_data 为前12列特征数据
x_data = ds[:, :12]
# y_data 为最后1列标签数据
y_data = ds[:, 12]
# print('x_data shape=', x_data.shape)
# print('y_data shape=', y_data.shape)

# ===== 划分训练集、验证集、测试集 =====
train_num = 300
valid_num = 100
test_num = len(x_data) - train_num - valid_num

x_train = x_data[:train_num]
y_train = y_data[:train_num]
x_valid = x_data[train_num:train_num+valid_num]
y_valid = y_data[train_num:train_num+valid_num]
x_test = x_data[train_num+valid_num:]
y_test = y_data[train_num+valid_num:]
# 转为 tf.float32类型，后面求损失时要和变量W执行 tf.matmul操作
# x_train = tf.cast(x_train, tf.float32)
# x_valid = tf.cast(x_valid, tf.float32)
# x_test = tf.cast(x_test, tf.float32)
# ！！！不做归一化的话，训练一开始就会报出损失值为NaN！！！
x_train = tf.cast(scale(x_train), tf.float32)
x_valid = tf.cast(scale(x_valid), tf.float32)
x_test = tf.cast(scale(x_test), tf.float32)

# ===== 构建模型 =====
def model(x, w, b):
    # 注意此处w,b不再是标量，执行的是矩阵叉乘
    return tf.matmul(x, w) + b


# 创建变量
W = tf.Variable(tf.random.normal([12, 1], mean=0.0, stddev=1.0, dtype=tf.float32))
B = tf.Variable(tf.zeros(1), dtype=tf.float32)

# ===== 训练模型 =====
train_epochs = 50  # 迭代次数（训练轮数）
learning_rate = 0.001  # 学习率
batch_size = 10  # 批量训练一次的样本数，用于小批量梯度下降算法


# 定义损失函数
def loss(x, y, w, b):
    err = model(x, w, b) - y
    return tf.reduce_mean(tf.square(err))


# 定义梯度计算函数
def grad(x, y, w, b):
    with tf.GradientTape() as tape:
        loss_ = loss(x, y, w, b)
    return tape.gradient(loss_, [w, b])


# 选择优化器，本例中直接使用优化器，不再手动更新[w,b]
optimizer = tf.keras.optimizers.SGD(learning_rate)
# 迭代训练
loss_list_train = []  # 用户保存训练集loss值的列表
loss_list_valid = []  # 用户保存验证集loss值的列表
total_step = int(train_num/batch_size)
for epoch in range(train_epochs):
    for step in range(total_step):
        xs = x_train[step*batch_size:(step+1)*batch_size, :]
        ys = y_train[step*batch_size:(step+1)*batch_size]
        grads = grad(xs, ys, W, B)  # 计算梯度
        optimizer.apply_gradients(zip(grads, [W,B]))  # 优化器根据梯度自动调整w和b

    loss_train = loss(x_train, y_train, W, B).numpy()
    loss_valid = loss(x_valid, y_valid, W, B).numpy()
    loss_list_train.append(loss_train)
    loss_list_valid.append(loss_valid)
    print('epoch=%3d, train_loss=%.4f, valid_loss=%.4f' % (epoch+1, loss_train, loss_valid))

# 可视化损失值
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.plot(loss_list_train, color='blue', label='Train Loss')
plt.plot(loss_list_valid, color='red', label='Valid Loss')
plt.legend(loc=1)  # 通过loc指定图例位置
plt.show()

# 查看测试集的损失
print('Test_loss: %.4f', loss(x_test, y_test, W, B).numpy())

# ===== 应用模型 ======
# test_house_id = np.random.randint(0, test_num)
test_house_id = 82
y_actual = y_test[test_house_id]
y_pred = model(x_test, W, B)[test_house_id]
y_predit = tf.reshape(y_pred, ()).numpy()
print('Actual value:', y_actual, 'Predicted value:', y_predit)
