import numpy as np
import matplotlib.pyplot as plt

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei'] 
plt.rcParams['axes.unicode_minus'] = False

def stochastic_gradient_descent(X, y, learning_rate=0.01, epochs=100):
    """随机梯度下降实现线性回归"""
    
    # 初始化参数
    w = np.random.randn(X.shape[1])
    b = 0
    losses = []
    
    for epoch in range(epochs):
        total_loss = 0
        
        # 随机打乱数据
        indices = np.random.permutation(len(X))
        
        for i in indices:
            # 单个样本的前向传播和损失计算
            y_pred = np.dot(X[i], w) + b
            loss = (y_pred - y[i])**2
            
            # 计算单个样本的梯度[6]
            dw = 2 * (y_pred - y[i]) * X[i]
            db = 2 * (y_pred - y[i])
            
            # 更新参数
            w = w - learning_rate * dw
            b = b - learning_rate * db
            
            total_loss += loss
        
        avg_loss = total_loss / len(X)
        losses.append(avg_loss)
        
        if epoch % 20 == 0:
            print(f'Epoch {epoch}, Loss: {avg_loss:.4f}')
    
    return w, b, losses

# 1. 生成模拟数据
np.random.seed(42)  # 设置随机种子确保结果可重现
X = 2 * np.random.rand(100, 1)  # 生成100个样本，特征范围[0, 2]
true_w = 3  # 真实权重
true_b = 4   # 真实偏置
y = true_b + true_w * X + np.random.randn(100, 1) * 0.5  # 添加噪声

# 确保y是一维数组
y = y.flatten()

print(f"数据形状: X={X.shape}, y={y.shape}")
print(f"真实参数: w={true_w}, b={true_b}")

# 2. 运行随机梯度下降
print("\n开始训练...")
w_trained, b_trained, loss_history = stochastic_gradient_descent(
    X, y, learning_rate=0.01, epochs=100
)

print(f"\n训练结果:")
print(f"学习到的权重 w: {w_trained[0]:.4f}")
print(f"学习到的偏置 b: {b_trained:.4f}")
print(f"真实参数 -> w: {true_w}, b: {true_b}")

# 3. 可视化结果
plt.figure(figsize=(15, 5))

# 子图1: 原始数据和拟合直线
plt.subplot(1, 3, 1)
plt.scatter(X, y, alpha=0.7, label='训练数据')
x_range = np.array([[0], [2]])
y_pred = w_trained[0] * x_range + b_trained
plt.plot(x_range, y_pred, 'r-', linewidth=2, label=f'SGD拟合: y = {b_trained:.2f} + {w_trained[0]:.2f}x')
plt.xlabel('X')
plt.ylabel('y')
plt.title('随机梯度下降拟合结果')
plt.legend()
plt.grid(True, alpha=0.3)

# 子图2: 损失下降曲线
plt.subplot(1, 3, 2)
plt.plot(loss_history)
plt.xlabel('迭代次数')
plt.ylabel('损失值')
plt.title('损失函数下降曲线')
plt.grid(True, alpha=0.3)

# 子图3: 参数收敛过程（需要修改函数来记录参数历史）
plt.subplot(1, 3, 3)
# 为了演示，我们重新运行一次并记录参数
def sgd_with_tracking(X, y, learning_rate=0.01, epochs=100):
    w = np.random.randn(X.shape[1])
    b = 0
    w_history, b_history = [w.copy()], [b]
    
    for epoch in range(epochs):
        indices = np.random.permutation(len(X))
        for i in indices:
            y_pred = np.dot(X[i], w) + b
            dw = 2 * (y_pred - y[i]) * X[i]
            db = 2 * (y_pred - y[i])
            w -= learning_rate * dw
            b -= learning_rate * db
        
        if epoch % 10 == 0:  # 每10轮记录一次
            w_history.append(w.copy())
            b_history.append(b)
    
    return w, b, w_history, b_history

w_final, b_final, w_hist, b_hist = sgd_with_tracking(X, y)
epochs_plot = range(0, 101, 10)
plt.plot(epochs_plot, [true_w] * len(epochs_plot), 'g--', label='真实w', alpha=0.7)
plt.plot(epochs_plot, [true_b] * len(epochs_plot), 'b--', label='真实b', alpha=0.7)
plt.plot(epochs_plot, [w[0] for w in w_hist], 'g-', label='学习到的w')
plt.plot(epochs_plot, b_hist, 'b-', label='学习到的b')
plt.xlabel('迭代次数')
plt.ylabel('参数值')
plt.title('参数收敛过程')
plt.legend()
plt.grid(True, alpha=0.3)

plt.tight_layout()
plt.show()

# 4. 模型预测示例
print("\n预测示例:")
test_X = np.array([[0.5], [1.0], [1.5]])
print("输入X:", test_X.flatten())
predictions = w_trained[0] * test_X + b_trained
print("预测y:", [f'{p[0]:.2f}' for p in predictions])

# 计算最终误差
final_predictions = w_trained[0] * X + b_trained
final_loss = np.mean((final_predictions - y) ** 2)
print(f"\n最终均方误差: {final_loss:.4f}")