# 波士顿房价预测
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.datasets import boston_housing
from tensorflow.keras.callbacks import EarlyStopping
from matplotlib import pyplot as plt

(train_data, train_labels), (test_data, test_labels) = boston_housing.load_data()
print(train_data.shape)
print(train_labels.shape)
print(test_data.shape)
print(test_labels.shape)
print(train_data[0])
print(train_labels[0])

# 最高和最低房价
print(max(train_labels))
print(min(train_labels))

# 数据标准化
# axis=0 代表列
# 计算每一个属性的平均数、标准差
mean_per_attr = np.mean(train_data, axis=0)
std_per_attr = np.std(train_data, axis=0)
# 逐元素运算
train_data -= mean_per_attr
train_data /= std_per_attr
# 此时每列数据的平均值=0，标准差=1
print(np.mean(train_data, axis=0))
print(np.std(train_data, axis=0))
# 测试数据标准化
test_data -= mean_per_attr
test_data /= std_per_attr

def build_model():
    model = keras.Sequential(
        layers=[
            keras.layers.Dense(units=64, activation='relu'),
            keras.layers.Dense(units=64, activation='relu'),
            keras.layers.Dense(units=1)
        ]
    )
    # mse是预测值和目标值的差值的平方的平均值，mae是绝对误差的平均值
    model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
    return model


model = build_model()
num_val_data = 100
offset = 0
start_pos = num_val_data * offset
end_pos = num_val_data * (offset + 1)
val_data = train_data[start_pos:end_pos]
val_labels = train_labels[start_pos:end_pos]
partial_train_data = np.concatenate((train_data[:start_pos], train_data[end_pos:]), axis=0)
partial_train_labels = np.concatenate((train_labels[:start_pos], train_labels[end_pos:]), axis=0)
print(partial_train_data.shape)
print(partial_train_labels.shape)
epochs = 200
# 定义早停参数
# 监控val_loss指标，如果在20轮中，指标没有改善，则停止训练，并恢复最佳权重
early_stopping = EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True)
history = model.fit(x=partial_train_data, y=partial_train_labels, epochs=epochs, batch_size=16,
                    validation_data=[val_data, val_labels], verbose=1, callbacks=[early_stopping])

records = history.history
assert isinstance(records, dict)
# 绘图，观察训练过程中的过拟合
# x_start = 0
# x_end = epochs
# x = np.linspace(x_start, x_end, x_end - x_start)
# fig, ax = plt.subplots()
# ax.plot(x, records['mae'][x_start:x_end], label='mae', c='blue')
# ax.plot(x, records['val_mae'][x_start:x_end], label='val_mae', c='r')
# ax.set_title('mae and val_mae lines')
# ax.set_xlabel('epochs')
# ax.set_ylabel('mae')
# ax.legend()
# plt.show()
# model.evaluate(test_data, test_labels)
# 稳健拟合的终点是在epoch=130左右
# 重新以全量数据进行训练，训练130轮次
# new_model = build_model()
# new_model.fit(x=train_data, y=train_labels, epochs=130, batch_size=16, verbose=0)
model.evaluate(test_data, test_labels)
