import numpy as np

'''
    优化器， 包括梯度下降和随机梯度下降
'''

def gradient_descent(model, X, y, learning_rate, num_iterations):
    """
    使用梯度下降算法优化模型参数

    Parameters:
        model (object): 需要优化的机器学习模型.
        X (array-like): 模型的输入数据.
        y (array-like): 模型的目标值.
        learning_rate (float): 优化器的学习率.
        num_iterations (int): 优化器的迭代次数.

    Returns:
        object: The optimized model.
    """
    # obtain the number of training examples
    m = X.shape[0]

    for i in range(num_iterations):
        # make predictions using the current model parameters
        y_pred = model.predict(X)

        # calculate gradients
        grads = model.gradient(X, y, y_pred)

        # update model parameters
        for j in range(len(model.params)):
            model.params[j] = model.params[j] - learning_rate * grads[j] / m

    return model

# 定义损失函数
def loss_function(y_pred, y_true):
    return np.mean((y_pred - y_true)**2)

# 定义模型
class Model:
    def __init__(self):
        self.w = np.random.randn()
        self.b = np.random.randn()

    def predict(self, x):
        return self.w*x + self.b

    def gradient(self, x, y_true):
        y_pred = self.predict(x)
        w_grad = 2*np.mean((y_pred - y_true)*x)
        b_grad = 2*np.mean(y_pred - y_true)
        return w_grad, b_grad

# 随机梯度下降函数
def SGD(model, x_train, y_train, learning_rate=0.01, epochs=100):
    n_samples = len(x_train)
    for epoch in range(epochs):
        for i in range(n_samples):
            x = x_train[i]
            y_true = y_train[i]
            w_grad, b_grad = model.gradient(x, y_true)
            model.w -= learning_rate*w_grad
            model.b -= learning_rate*b_grad
        y_pred = model.predict(x_train)
        loss = loss_function(y_pred, y_train)
        print(f"Epoch: {epoch+1} | Loss: {loss:.4f}")

# 生成训练数据
x_train = np.linspace(0, 1, num=100)
y_train = 2*x_train + 1 + np.random.randn(100)*0.2

# 创建模型实例并使用随机梯度下降进行优化
model = Model()
SGD(model, x_train, y_train)