import numpy as np
import matplotlib.pyplot as plt
from experiment2_2 import feature_normalize


def linear_regression_by_gradient_descent(x, y, alpha, num_iters):
    """
    线性回归 梯度下降法 (n 个特征数据, 1 个目标值)

    :param x: 特征数据
    :param y: 目标值
    :param alpha: 学习率, 控制梯度下降的步长
    :param num_iters: 迭代次数
    """
    # 归一化
    x, mu, sigma = feature_normalize(x)
    # 处理特征数据与目标值
    x = np.column_stack((np.ones(x.shape[0]), x))  # 特征数据添加截距列 (全1)
    y = y.reshape(-1, 1)  # 将目标值重塑为 shape = (y.shape, 1) 的二维数组 (列向量)
    # 将特征数据与目标值由二维数组转换为矩阵
    x_mat = np.mat(x)
    y_mat = np.mat(y)

    # 梯度下降, 求解系数 theta
    theta = np.zeros(x_mat.shape[1]).reshape(-1, 1)  # 初始化 theta 为全 0 列向量
    theta, cost_history = gradient_descent(x_mat, y_mat, theta, alpha, num_iters)
    plot_cost(cost_history, num_iters)

    # 预测结果
    test = np.array([1650, 3]).reshape(1, -1)  # 将测试数据重塑为 shape = (1, test.shape) 的二维数组 (行向量)
    test = (test - mu) / sigma  # 归一化处理测试数据
    test = np.column_stack((np.ones(test.shape[0]), test))  # 测试数据添加截距列 (全1)
    test_mat = np.mat(test)
    print(f"预测结果: {test_mat * theta}")


def gradient_descent(x, y, theta, alpha, num_iters):
    cost_history = np.zeros((num_iters, 1))
    for i in range(num_iters):
        predict = np.dot(x, theta)
        theta -= ((alpha / y.shape[0]) * (np.dot(np.transpose(x), predict - y)))
        cost_history[i] = compute_cost(x, y, theta)
    return theta, cost_history


def compute_cost(x, y, theta):
    return (np.transpose(x * theta - y)) * (x * theta - y) / (2 * y.shape[0])


def plot_cost(cost_history, num_iters):
    x = np.arange(1, num_iters + 1)
    plt.plot(x, cost_history)
    plt.show()


if __name__ == "__main__":
    data = np.loadtxt("data.txt", delimiter=",")
    x_data = np.array(data[:, 0:-1])
    y_data = np.array(data[:, -1])

    linear_regression_by_gradient_descent(x_data, y_data, alpha=0.1, num_iters=400)
