import numpy as np
import matplotlib.pyplot as plt

# 读取训练数据
train = np.loadtxt(r'E:\Learning_Notes\Maths_about_ML\sourcecode-cn\click.csv', delimiter=',', skiprows=1)
train_x = train[:, 0]
train_y = train[:, 1]

# 标准化函数
mu = train_x.mean()
sigma = train_x.std()
def standardized(x):
    return (x - mu) / sigma

train_z = standardized(train_x)  # 标准化后的训练特征

# 创建二次多项式特征矩阵（含常数项、一次项、二次项）
def to_matrix(x):
    return np.vstack([np.ones(x.shape[0]), x, x**2]).T

X = to_matrix(train_z)  # 训练数据的特征矩阵

# 预测函数（矩阵乘法）
def f(x_matrix, theta):
    return np.dot(x_matrix, theta)

# 均方误差（MSE）
def MSE(x_matrix, y, theta):
    return (1 / x_matrix.shape[0]) * np.sum((y - f(x_matrix, theta))** 2)

# --------------- 1. 批量梯度下降（原始版本）---------------
theta1 = np.random.rand(3)  # 独立参数，避免覆盖
ETA = 1e-3
diff = 1
error1 = MSE(X, train_y, theta1)
while diff > 1e-2:
    theta1 = theta1 - ETA * np.dot(f(X, theta1) - train_y, X)
    current_error = MSE(X, train_y, theta1)
    diff = error1 - current_error
    error1 = current_error

# --------------- 2. 跟踪MSE的梯度下降 ---------------
theta2 = np.random.rand(3)  # 独立参数
errors2 = []
diff = 1
errors2.append(MSE(X, train_y, theta2))
while diff > 1e-2:
    theta2 = theta2 - ETA * np.dot(f(X, theta2) - train_y, X)
    errors2.append(MSE(X, train_y, theta2))
    diff = abs(errors2[-2] - errors2[-1])  # 取绝对值，避免负差值终止

# --------------- 3. 随机梯度下降（SGD）---------------
theta3 = np.random.rand(3)  # 独立参数
errors3 = []
diff = 1
errors3.append(MSE(X, train_y, theta3))
while diff > 1e-2:
    p = np.random.permutation(X.shape[0])  # 随机打乱样本顺序
    for x, y in zip(X[p, :], train_y[p]):
        theta3 = theta3 - ETA * (f(x.reshape(1, -1), theta3) - y) * x  # 单个样本更新
    errors3.append(MSE(X, train_y, theta3))
    diff = abs(errors3[-2] - errors3[-1])

# 生成绘图用的x轴数据
x_plot = np.linspace(-3, 3, 100)  # 用于绘制拟合曲线的x
x_error2 = np.arange(len(errors2))  # 第二种方法的误差迭代次数
x_error3 = np.arange(len(errors3))  # 第三种方法的误差迭代次数

# 创建2x2子图
fig, axes = plt.subplots(2, 2, figsize=(12, 8))

# 1. 第一个子图：批量梯度下降的拟合结果
ax1 = axes[0, 0]
ax1.plot(train_z, train_y, 'o', color='blue', label='Training data')
ax1.plot(x_plot, f(to_matrix(x_plot), theta1), color='red', label='Fitted curve')
ax1.set_xlim(-3, 3)
ax1.set_ylim(0, 800)
ax1.set_title('Batch Gradient Descent')
ax1.legend()

# 2. 第二个子图：梯度下降的MSE变化
ax2 = axes[0, 1]
ax2.plot(x_error2, errors2, color='green')
ax2.set_title('MSE vs Iterations (GD)')
ax2.set_xlabel('Iterations')
ax2.set_ylabel('MSE')

# 3. 第三个子图：随机梯度下降的拟合结果
ax3 = axes[1, 0]
ax3.plot(train_z, train_y, 'o', color='red', label='Training data')
ax3.plot(x_plot, f(to_matrix(x_plot), theta3), color='purple', label='Fitted curve (SGD)')
ax3.set_title('Stochastic Gradient Descent')
ax3.legend()

# 4. 第四个子图：随机梯度下降的MSE变化
ax4 = axes[1, 1]
ax4.plot(x_error3, errors3, color='orange')
ax4.set_title('MSE vs Iterations (SGD)')
ax4.set_xlabel('Iterations')
ax4.set_ylabel('MSE')

plt.tight_layout()  # 自动调整布局
plt.show()