import tensorflow as tf
from tensorflow.keras import datasets, models
from tensorflow.keras import layers, optimizers
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split

# 加载波士顿房价数据集
boston = load_boston()
x, y = boston.data, boston.target
print('x shape:', x.shape, 'y shape:', y.shape)
print('Features:', boston.feature_names)
print('Target range: [{:.2f}, {:.2f}]'.format(y.min(), y.max()))

# 数据标准化
scaler = StandardScaler()
x = scaler.fit_transform(x)

# 划分训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
print('Training set:', x_train.shape, y_train.shape)
print('Test set:', x_test.shape, y_test.shape)

# 将数据转换为Dataset对象
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))

# 统一的预处理函数
def preprocess(x, y):
    x = tf.cast(x, dtype=tf.float32)
    y = tf.cast(y, dtype=tf.float32)
    # 对于回归任务，不需要one-hot编码，直接使用原始值
    return x, y

# 应用预处理函数
train_dataset = train_dataset.map(preprocess)
test_dataset = test_dataset.map(preprocess)

# 批处理
batch_size = 32
train_dataset = train_dataset.shuffle(1000).batch(batch_size)
test_dataset = test_dataset.batch(batch_size)

# 构建回归模型
model = tf.keras.models.Sequential()
model.add(layers.Dense(128, activation='relu', input_shape=(13,)))  # 波士顿房价有13个特征
model.add(layers.Dropout(0.2))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(1, activation='linear'))  # 输出层使用线性激活函数，输出单个数值

model.summary()

# 编译模型 - 回归任务使用均方误差(MSE)作为损失函数
model.compile(optimizer='adam',
              loss='mse',  # 均方误差
              metrics=['mse'])  # 平均绝对误差

# 训练模型
history = model.fit(train_dataset,
                   epochs=100,  # 回归任务通常需要更多轮次
                   validation_data=test_dataset,
                   verbose=1)

# 性能评估
test_loss, test_mae = model.evaluate(test_dataset)
print(f'Test MSE: {test_loss:.4f}')
print(f'Test MAE: {test_mae:.4f}')

# 绘制训练过程
plt.figure(figsize=(12, 4))

plt.subplot(1, 2, 1)
plt.plot(history.history['mae'], label='Training MAE')
plt.plot(history.history['val_mae'], label='Validation MAE')
plt.title('Model MAE')
plt.xlabel('Epoch')
plt.ylabel('MAE')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Model Loss (MSE)')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.tight_layout()
plt.show()

# 保存模型
model.save('boston_housing_model.h5')
print("Model saved as boston_housing_model.h5")

# 加载模型并进行预测
loaded_model = models.load_model('boston_housing_model.h5')

# 从测试集中取一些样本进行预测
test_features, test_targets = next(iter(test_dataset))
predictions = loaded_model.predict(test_features)

# 显示预测结果
plt.figure(figsize=(12, 8))

# 绘制实际值 vs 预测值散点图
plt.subplot(2, 2, 1)
plt.scatter(test_targets.numpy(), predictions.flatten(), alpha=0.7)
plt.plot([test_targets.numpy().min(), test_targets.numpy().max()],
         [test_targets.numpy().min(), test_targets.numpy().max()], 'r--', lw=2)
plt.xlabel('Actual Prices')
plt.ylabel('Predicted Prices')
plt.title('Actual vs Predicted Prices')

# 绘制误差分布
plt.subplot(2, 2, 2)
errors = predictions.flatten() - test_targets.numpy()
plt.hist(errors, bins=20, alpha=0.7)
plt.xlabel('Prediction Error')
plt.ylabel('Frequency')
plt.title('Prediction Error Distribution')

# 绘制前几个样本的对比
plt.subplot(2, 1, 2)
sample_size = min(10, len(test_targets))
indices = np.arange(sample_size)
width = 0.35

plt.bar(indices - width/2, test_targets.numpy()[:sample_size], width, label='Actual', alpha=0.7)
plt.bar(indices + width/2, predictions.flatten()[:sample_size], width, label='Predicted', alpha=0.7)
plt.xlabel('Sample Index')
plt.ylabel('Price')
plt.title('Actual vs Predicted Prices (First 10 Samples)')
plt.legend()

plt.tight_layout()
plt.show()

# 打印一些统计信息
print("\n=== 模型性能统计 ===")
print(f"测试集MSE: {test_loss:.4f}")
print(f"测试集MAE: {test_mae:.4f}")
print(f"平均预测误差: {np.mean(np.abs(errors)):.4f}")
print(f"预测误差标准差: {np.std(errors):.4f}")

# 显示前几个样本的实际值和预测值对比
print("\n=== 前5个样本预测对比 ===")
for i in range(min(5, len(test_targets))):
    actual = test_targets.numpy()[i]
    predicted = predictions.flatten()[i]
    error = predicted - actual
    print(f"样本 {i+1}: 实际值 = {actual:.2f}, 预测值 = {predicted:.2f}, 误差 = {error:.2f}")