import matplotlib.pyplot as plt
import numpy as np

# 设置matplotlib支持中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']  # 设置中文显示
plt.rcParams['axes.unicode_minus'] = False  # 正确显示负号

# 示例数据
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3

# 初始化参数，考虑偏置项
theta = np.random.randn(3, 1)
iterations = 1000
alpha = 0.01


# 损失函数
def compute_cost(X, y, theta):
    m = len(y)
    predictions = X.dot(theta)
    cost = (1 / 2 * m) * np.sum(np.square(predictions - y))
    return cost


# 梯度下降
def gradient_descent(X, y, theta, alpha, iterations):
    m = len(y)
    cost_history = np.zeros(iterations)

    for i in range(iterations):
        gradients = X.T.dot(X.dot(theta) - y) / m
        theta = theta - alpha * gradients
        cost_history[i] = compute_cost(X, y, theta)

    return theta, cost_history


# 添加偏置项
X_b = np.c_[np.ones((len(X), 1)), X]

# 运行梯度下降
theta, cost_history = gradient_descent(X_b, y, theta, alpha, iterations)

# 结果可视化
plt.plot(range(1, iterations + 1), cost_history, 'b-')
plt.xlabel('迭代次数')
plt.ylabel('损失值')
plt.title('梯度下降优化损失值')
plt.show()

print(f"优化后的参数: {theta.ravel()}")