#!/usr/bin/python3.9
# -*- coding: utf-8 -*-
# @Time    : 2021/10/13 15:31
# @Author  : YHSimon
"""
    在本练习中，您将实现正则化的线性回归和多项式回归，并使用它来研究具有不同偏差-方差属性的模型

    在前半部分的练习中，你将实现正则化线性回归，以预测水库中的水位变化，从而预测大坝流出的水量。
    在下半部分中，您将通过一些调试学习算法的诊断，并检查偏差 v.s. 方差的影响。
"""

import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
import scipy.optimize as opt


def plotData():
    plt.figure(figsize=(8, 5))
    plt.scatter(X[:, 1:], y, c='r', marker='x')
    plt.xlabel('Change in water level (x)')
    plt.ylabel('Water flowing out of the dam (y)')
    plt.grid(True)
    # plt.show()


def costReg(theta, X, y, l):
    # do not regularizethe theta0
    # theta is a 1-d array with shape (n+1,)
    # X is a matrix with shape (m, n+1)
    # y is a matrix with shape (m, 1)
    cost = ((X @ theta - y.flatten()) ** 2).sum()
    regTerm = l * (theta[1:] @ theta[1:])
    return (cost + regTerm) / (2 * len(X))


def gradientReg(theta, X, y, l):
    """
    :param theta: 1-d array with shape(2,)
    :param X:     2-d array with shape(12,2)
    :param y:     2-d array with shape(12,1)
    :param l:     lambda constant
                  grad has same shape as theta(2,)
    """
    grad = (X @ theta - y.flatten()) @ X
    regTerm = l * theta
    regTerm[0] = 0  # 不要调节偏差项
    return (grad + regTerm) / len(X)


def trainLinearReg(X, y, l):
    theta = np.zeros(X.shape[1])
    res = opt.minimize(fun=costReg, x0=theta, args=(X, y, l), method='TNC', jac=gradientReg)
    print(res)
    return res.x


"""
    训练样本X从1开始逐渐增加，训练出不同的参数向量θ。接着通过交叉验证样本Xval计算验证误差。

    使用训练集的子集来训练模型，得到不同的theta。

    通过theta计算训练代价和交叉验证代价，切记此时不要使用正则化，将 λ = 0。

    计算交叉验证代价时记得整个交叉验证集来计算，无需分为子集。
"""


def plot_learning_curve(X, y, Xval, yval, l):
    """画出学习曲线，即交叉验证误差和训练误差随样本数量的变化的变化"""
    xx = range(1, len(X) + 1)  # 至少有一个样本
    training_cost, cv_cost = [], []
    for i in xx:
        res = trainLinearReg(X[:i], y[:i], 1)
        training_cost_i = costReg(res, X[:i], y[:i], 0)
        cv_cost_i = costReg(res, Xval, yval, 0)
        training_cost.append(training_cost_i)
        cv_cost.append(cv_cost_i)
    plt.figure(figsize=(8, 5))
    plt.plot(xx, training_cost, label='training cost')
    plt.plot(xx, cv_cost, label='cv cost')
    plt.legend()
    plt.xlabel('Number of training examples')
    plt.ylabel('Error')
    plt.title('Learning curve for linear regression')
    plt.grid(True)


if __name__ == '__main__':
    path = 'ex5data1.mat'
    data = loadmat(path)
    X, y = data['X'], data['y']
    Xval, yval = data['Xval'], data['yval']
    Xtest, ytest = data['Xtest'], data['ytest']
    X = np.insert(X, 0, 1, axis=1)
    Xval = np.insert(Xval, 0, 1, axis=1)
    Xtest = np.insert(Xtest, 0, 1, axis=1)
    print('X={},y={}'.format(X.shape, y.shape))
    print('Xval={},yval={}'.format(Xval.shape, yval.shape))
    print('Xtest={},ytest={}'.format(Xtest.shape, ytest.shape))
    """
        X=(12, 2),y=(12, 1)
        Xval=(21, 2),yval=(21, 1)
        Xtest=(21, 2),ytest=(21, 1)
    """
    # plotData()

    # 1.2 正则化线性回归代价函数
    # Using theta initialized at [1, 1], and lambda = 1
    theta = np.ones(X.shape[1])
    print(costReg(theta, X, y, 1))  # 303.9931922202643

    # 1.3 正则化线性回归梯度
    print(gradientReg(theta, X, y, 1))  # gradient of [-15.303016; 598.250744] (with lambda=1)

    # 1.4 拟合线性回归
    fit_theta = trainLinearReg(X, y, 0)
    plotData()
    plt.plot(X[:, 1], X @ fit_theta)
    print(X.shape, fit_theta.shape, (X @ fit_theta).shape)
    plt.show()

    # 这里我们把λ = 0，因为我们现在实现的线性回归只有两个参数，这么低的维度，正则化并没有用。
    # 2.1 学习曲线
    plot_learning_curve(X, y, Xval, yval, 0)
    plt.show()
