<<<<<<< HEAD
import numpy as np
import matplotlib.pyplot as plt

# 生成模拟数据
def generate_data(num_points=100, noise_level=0.5):
    np.random.seed(42)  # 确保可重现结果
    X = np.linspace(-5, 5, num_points).reshape(-1, 1)
    y = 2 * X + 3 + np.random.normal(0, noise_level, size=(num_points, 1))
    return X, y

# 梯度下降法实现
def gradient_descent(X, y, learning_rate=0.01, epochs=1000):
    # 添加偏置项 (x0 = 1)
    X_b = np.c_[np.ones((X.shape[0], 1)), X]
    
    # 初始化参数 (theta0, theta1)
    theta = np.random.randn(2, 1)
    
    # 存储损失历史
    loss_history = []
    
    # 梯度下降迭代
    for epoch in range(epochs):
        # 计算预测值
        y_pred = X_b.dot(theta)
        
        # 计算误差
        error = y_pred - y
        
        # 计算损失 (MSE)
        loss = np.mean(error ** 2)
        loss_history.append(loss)
        
        # 计算梯度
        gradients = (1 / len(X)) * X_b.T.dot(error)
        
        # 更新参数
        theta = theta - learning_rate * gradients
        
        # 每100次迭代打印进度
        if epoch % 100 == 0:
            print(f"Epoch {epoch}/{epochs} - Loss: {loss:.4f}")
    
    return theta, loss_history

# 可视化结果
def visualize_results(X, y, theta, loss_history):
    plt.figure(figsize=(15, 5))
    
    # 数据点和回归线
    plt.subplot(1, 2, 1)
    plt.scatter(X, y, alpha=0.7, label='原始数据')
    plt.plot(X, theta[0] + theta[1]*X, 'r-', linewidth=2, label='回归线')
    plt.title(f'线性回归: y = {theta[0][0]:.2f} + {theta[1][0]:.2f}x')
    plt.xlabel('X')
    plt.ylabel('y')
    plt.legend()
    plt.grid(True)
    
    # 损失函数下降过程
    plt.subplot(1, 2, 2)
    plt.plot(loss_history, 'b-')
    plt.title('损失函数下降')
    plt.xlabel('迭代次数')
    plt.ylabel('均方误差 (MSE)')
    plt.grid(True)
    
    plt.tight_layout()
    plt.show()

# 主函数
def main():
    # 生成数据
    X, y = generate_data(num_points=100, noise_level=1.5)
    
    # 运行梯度下降
    theta, loss_history = gradient_descent(X, y, learning_rate=0.05, epochs=1000)
    
    # 打印最终参数
    print(f"\n最终参数: theta0 = {theta[0][0]:.4f}, theta1 = {theta[1][0]:.4f}")
    
    # 可视化结果
    visualize_results(X, y, theta, loss_history)
    
    # 使用模型进行预测
    test_X = np.array([[-3], [0], [4]])
    test_X_b = np.c_[np.ones((3, 1)), test_X]
    predictions = test_X_b.dot(theta)
    
    print("\n预测结果:")
    for x, pred in zip(test_X, predictions):
        print(f"当 x = {x[0]:.1f} 时, y ≈ {pred[0]:.2f}")

if __name__ == "__main__":
=======
import numpy as np
import matplotlib.pyplot as plt

# 生成模拟数据
def generate_data(num_points=100, noise_level=0.5):
    np.random.seed(42)  # 确保可重现结果
    X = np.linspace(-5, 5, num_points).reshape(-1, 1)
    y = 2 * X + 3 + np.random.normal(0, noise_level, size=(num_points, 1))
    return X, y

# 梯度下降法实现
def gradient_descent(X, y, learning_rate=0.01, epochs=1000):
    # 添加偏置项 (x0 = 1)
    X_b = np.c_[np.ones((X.shape[0], 1)), X]
    
    # 初始化参数 (theta0, theta1)
    theta = np.random.randn(2, 1)
    
    # 存储损失历史
    loss_history = []
    
    # 梯度下降迭代
    for epoch in range(epochs):
        # 计算预测值
        y_pred = X_b.dot(theta)
        
        # 计算误差
        error = y_pred - y
        
        # 计算损失 (MSE)
        loss = np.mean(error ** 2)
        loss_history.append(loss)
        
        # 计算梯度
        gradients = (1 / len(X)) * X_b.T.dot(error)
        
        # 更新参数
        theta = theta - learning_rate * gradients
        
        # 每100次迭代打印进度
        if epoch % 100 == 0:
            print(f"Epoch {epoch}/{epochs} - Loss: {loss:.4f}")
    
    return theta, loss_history

# 可视化结果
def visualize_results(X, y, theta, loss_history):
    plt.figure(figsize=(15, 5))
    
    # 数据点和回归线
    plt.subplot(1, 2, 1)
    plt.scatter(X, y, alpha=0.7, label='原始数据')
    plt.plot(X, theta[0] + theta[1]*X, 'r-', linewidth=2, label='回归线')
    plt.title(f'线性回归: y = {theta[0][0]:.2f} + {theta[1][0]:.2f}x')
    plt.xlabel('X')
    plt.ylabel('y')
    plt.legend()
    plt.grid(True)
    
    # 损失函数下降过程
    plt.subplot(1, 2, 2)
    plt.plot(loss_history, 'b-')
    plt.title('损失函数下降')
    plt.xlabel('迭代次数')
    plt.ylabel('均方误差 (MSE)')
    plt.grid(True)
    
    plt.tight_layout()
    plt.show()

# 主函数
def main():
    # 生成数据
    X, y = generate_data(num_points=100, noise_level=1.5)
    
    # 运行梯度下降
    theta, loss_history = gradient_descent(X, y, learning_rate=0.05, epochs=1000)
    
    # 打印最终参数
    print(f"\n最终参数: theta0 = {theta[0][0]:.4f}, theta1 = {theta[1][0]:.4f}")
    
    # 可视化结果
    visualize_results(X, y, theta, loss_history)
    
    # 使用模型进行预测
    test_X = np.array([[-3], [0], [4]])
    test_X_b = np.c_[np.ones((3, 1)), test_X]
    predictions = test_X_b.dot(theta)
    
    print("\n预测结果:")
    for x, pred in zip(test_X, predictions):
        print(f"当 x = {x[0]:.1f} 时, y ≈ {pred[0]:.2f}")

if __name__ == "__main__":
>>>>>>> 237300619cb42005cc7f4bf2912e8423091ca02a
    main()