"""
Gradient梯度下降法
"""
import numpy as np
import matplotlib.pyplot as plt


def dif_gradient(x_, y_, theta_):
    """
    :param x_: 原数据x值
    :param y_: 原数据x对应的y值
    :param theta_: 参数矩阵θ
    :return: 返回代价的计算结果 xT * (x * θ - y) / m
    """
    m_ = len(x_)
    diff = np.dot(x_, theta_) - y_
    return (1 / m_) * np.dot(np.transpose(x_), diff)


def costFunctionJ(x_, y_, theta_):
    """
    损失函数
    :param x_:
    :param y_:
    :param theta_:
    :return:
    """
    m_ = np.size(x_, axis=0)
    pre = np.dot(x_, theta_)
    sqrErr = np.multiply((pre - y_), (pre - y_))
    j = 1 / (2 * m_) * np.sum(sqrErr)
    return j


def gradient(x_, y_, theta_, alpha_, echo_):
    """
    梯度下降法计算θ
    :param x_:原数据x值
    :param y_:原数据x对应y值
    :param theta_: 初始θ
    :param alpha_: 学习率
    :param echo_: 迭代次数
    :return: 最终的θ(,历次代价值jlog,历次θ temp)
    """
    gra = dif_gradient(x_, y_, theta_)
    Jlog = np.mat(np.zeros([echo_, 1]))
    for i in range(echo_):
        theta_ = theta_ - alpha_ * gra
        gra = dif_gradient(x_, y_, theta_)
        Jlog[i] = costFunctionJ(x_, y_, theta_)
    return theta_, Jlog


file = '.\Regression.csv'

with open(file) as f:
    x = np.loadtxt(file, dtype=float, delimiter=",", skiprows=1, usecols=0)
    y = np.loadtxt(file, dtype=float, delimiter=",", skiprows=1, usecols=1)

m = len(x)
# x的坐标以及对应的矩阵
x = np.array(x).reshape(m, 1)
X_1 = np.ones((m, 1))
# reshape作用,改成一个矩阵
X = np.hstack((X_1, x))  # 得到一个X轴矩阵
# Y坐标
y = np.array(y).reshape(m, 1)

# 学习率
alpha1 = 0.01
alpha2 = 0.1
echo = 1500

# jlog用来记录每次的损失函数值
# jlog = np.may(np.zeros([echo, 1]))
# 自定义theta值
theta = np.array([1, 1]).reshape(2, 1)
Theta1, Jlog1 = gradient(X, y, theta, alpha1, echo)
print(Theta1)
Theta2, Jlog2 = gradient(X, y, theta, alpha2, echo)
print(Theta2)

# 绘制原数据的点图形
plt.plot(x, y, 'o', label='data')
# 绘制回归函数的图形
plt.plot(x, Theta1[1] * x + Theta1[0], 'b', label='0.01')
plt.plot(x, Theta2[1] * x + Theta2[0], 'r', label='0.1')
plt.legend()
plt.show()

# 绘制损失函数图形
plt.plot(Jlog1, label="0.01")
plt.plot(Jlog2, label="0.1")
plt.legend()
plt.show()

"""
分别使用MSE,MAE,RMSE评价这两个模型
"""


def mse(tar, pre):
    return np.linalg.norm(tar - pre, ord=2) ** 2 / len(tar)


def mae(tar, pre):
    return np.linalg.norm(tar - pre, ord=1) / len(tar)


def rmse(tar, pre):
    return np.linalg.norm(tar - pre, ord=2) / np.sqrt(len(tar))


print("alpha = 0.01:")
print("MSE:", '%.5f' % mse(y, (Theta1[1] * x + Theta1[0])), ",MAE:", '%.5f' % mae(y, (Theta1[1] * x + Theta1[0])),
      ",RMSE:", '%.5f' % rmse(y, (Theta1[1] * x + Theta1[0])))
print("alpha = 0.1:")
print("MSE:", '%.5f' % mse(y, (Theta2[1] * x + Theta2[0])), ",MAE:", '%.5f' % mae(y, (Theta2[1] * x + Theta2[0])),
      ",RMSE:", '%.5f' % rmse(y, (Theta2[1] * x + Theta2[0])))
