import numpy as np
import matplotlib.pyplot as plt

# 生成二元回归数据
np.random.seed(42)
X = np.random.randn(100, 2)  # 输入数据 (100个样本，2个特征)
true_weights = np.array([3.0, -2.0])
true_bias = 1.0
y = np.dot(X, true_weights) + true_bias + np.random.randn(100) * 0.1  # 输出数据 (线性关系 + 噪声)

# 初始化神经网络参数
input_size = 2  # 输入层神经元个数
hidden_size = 4  # 隐藏层神经元个数
output_size = 1  # 输出层神经元个数
W1 = np.random.randn(input_size, hidden_size)  # 输入层到隐藏层的权重
b1 = np.zeros(hidden_size)  # 隐藏层的偏置
W2 = np.random.randn(hidden_size, output_size)  # 隐藏层到输出层的权重
b2 = np.zeros(output_size)  # 输出层的偏置

# 定义激活函数
def relu(x):
    return np.maximum(0, x)
def linear(x):
    return x

# 前向传播
def forward_propagation(X, W1, b1, W2, b2):
    hidden_input = np.dot(X, W1) + b1
    hidden_output = relu(hidden_input)
    output_input = np.dot(hidden_output, W2) + b2
    output = linear(output_input)
    return output

predictions = forward_propagation(X, W1, b1, W2, b2)

# 计算均方误差
mse = np.mean((predictions.flatten() - y) ** 2)
print(f"Mean Squared Error: {mse}")

# 可视化预测值与真实值
plt.scatter(y, predictions.flatten(), alpha=0.6)
plt.plot([y.min(), y.max()], [y.min(), y.max()], 'r--')  # 绘制理想情况下的对角线
plt.xlabel("True Values")
plt.ylabel("Predictions")
plt.title("True vs Predicted Values")
plt.show()