#!/usr/bin/python3.9
# -*- coding: utf-8 -*-
# @Time    : 2021/10/13 20:41
# @Author  : YHSimon

"""  多项式回归
数据预处理

1. X，Xval，Xtest都需要添加多项式特征，这里我们选择增加到6次方，
因为若选8次方无法达到作业pdf上的效果图，这是因为scipy和octave版本的优化算法不同。
2. 不要忘了标准化。
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
import scipy.optimize as opt


def genPolyFeatures(X, power):
    """ 添加多项式特征
        每次在array的最后一列插入第二列的i+2次方（第一列为偏置）
        从二次方开始插入（因为本身还有一列一次方）
    """
    Xpoly = X.copy()
    for i in range(2, power + 1):
        Xpoly = np.insert(Xpoly, Xpoly.shape[1], np.power(Xpoly[:, 1], i), axis=1)
    return Xpoly


def get_means_std(X):
    # 获取训练集的均值和标准差，用来标准化所有数据
    means = np.mean(X, axis=0)
    stds = np.std(X, axis=0, ddof=1)  # ddof=1 means 样本标准差
    return means, stds


# 而且注意这里是样本标准差而不是总体标准差，使用np.std()时，将ddof=1则是样本标准差，默认=0是总体标准差。而pandas默认计算样本标准差。
def featureNormalize(myX, means, stds):
    """标准化"""
    X_norm = myX.copy()
    X_norm[:, 1:] = X_norm[:, 1:] - means[1:]
    X_norm[:, 1:] = X_norm[:, 1:] / stds[1:]
    return X_norm


def costReg(theta, X, y, l):
    # do not regularizethe theta0
    # theta is a 1-d array with shape (n+1,)
    # X is a matrix with shape (m, n+1)
    # y is a matrix with shape (m, 1)
    cost = ((X @ theta - y.flatten()) ** 2).sum()
    regTerm = l * (theta[1:] @ theta[1:])
    return (cost + regTerm) / (2 * len(X))


def gradientReg(theta, X, y, l):
    """
    :param theta: 1-d array with shape(2,)
    :param X:     2-d array with shape(12,2)
    :param y:     2-d array with shape(12,1)
    :param l:     lambda constant
                  grad has same shape as theta(2,)
    """
    grad = (X @ theta - y.flatten()) @ X
    regTerm = l * theta
    regTerm[0] = 0  # 不要调节偏差项
    return (grad + regTerm) / len(X)


def trainLinearReg(X, y, l):
    theta = np.zeros(X.shape[1])
    res = opt.minimize(fun=costReg, x0=theta, args=(X, y, l), method='TNC', jac=gradientReg)
    print(res)
    return res.x


def plot_learning_curve(X, y, Xval, yval, l):
    """画出学习曲线，即交叉验证误差和训练误差随样本数量的变化的变化"""
    xx = range(1, len(X) + 1)  # 至少有一个样本
    training_cost, cv_cost = [], []
    for i in xx:
        res = trainLinearReg(X[:i], y[:i], l)
        training_cost_i = costReg(res, X[:i], y[:i], 0)
        cv_cost_i = costReg(res, Xval, yval, 0)
        training_cost.append(training_cost_i)
        cv_cost.append(cv_cost_i)
    plt.figure(figsize=(8, 5))
    plt.plot(xx, training_cost, label='training cost')
    plt.plot(xx, cv_cost, label='cv cost')
    plt.legend()
    plt.xlabel('Number of training examples')
    plt.ylabel('Error')
    plt.title('Learning curve for linear regression')
    plt.grid(True)


def plotData():
    plt.figure(figsize=(8, 5))
    plt.scatter(X[:, 1:], y, c='r', marker='x')
    plt.xlabel('Change in water level (x)')
    plt.ylabel('Water flowing out of the dam (y)')
    plt.grid(True)


def plot_fit(means, stds, l):
    """画出拟合曲线"""
    theta = trainLinearReg(X_norm, y, l)
    x = np.linspace(-75, 55, 50)
    xmat = x.reshape(-1, 1)
    xmat = np.insert(xmat, 0, 1, axis=1)
    Xmat = genPolyFeatures(xmat, power)
    Xmat_norm = featureNormalize(Xmat, means, stds)

    plotData()
    plt.plot(x, Xmat_norm @ theta, 'b--')


if __name__ == '__main__':
    path = 'ex5data1.mat'
    data = loadmat(path)
    X, y = data['X'], data['y']
    Xval, yval = data['Xval'], data['yval']
    Xtest, ytest = data['Xtest'], data['ytest']
    X = np.insert(X, 0, 1, axis=1)
    Xval = np.insert(Xval, 0, 1, axis=1)
    Xtest = np.insert(Xtest, 0, 1, axis=1)
    # power = 6  # 扩展到x的6次方
    power = 8  # 扩展到x的8次方
    train_means, train_stds = get_means_std(genPolyFeatures(X, power))
    X_norm = featureNormalize(genPolyFeatures(X, power), train_means, train_stds)
    Xval_norm = featureNormalize(genPolyFeatures(Xval, power), train_means, train_stds)
    Xtest_norm = featureNormalize(genPolyFeatures(Xtest, power), train_means, train_stds)

    # λ=0时，训练误差太小，过拟合
    # plot_fit(train_means, train_stds, 0)
    # plot_learning_curve(X_norm, y, Xval_norm, yval, 0)

    # λ=1时，还行
    # plot_fit(train_means, train_stds, 1)
    # plot_learning_curve(X_norm, y, Xval_norm, yval, 1)

    # λ=100时，惩罚过多，欠拟合
    # plot_fit(train_means, train_stds, 2.16)
    # plot_learning_curve(X_norm, y, Xval_norm, yval, 2.16)
    # plt.show()

    # 3.3 通过验证集选择λ
    lambdas = [0., 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1., 3., 10.]
    errors_train, errors_val = [], []
    for l in lambdas:
        theta = trainLinearReg(X_norm, y, l)
        errors_train.append(costReg(theta, X_norm, y, 0))  # 记得把lambda = 0
        errors_val.append(costReg(theta, Xval_norm, yval, 0))

    plt.figure(figsize=(8, 5))
    plt.plot(lambdas, errors_train, label="Train")
    plt.plot(lambdas, errors_val, label="Cross Validation")
    plt.legend()
    plt.xlabel('lambda λ')
    plt.ylabel('Error')
    plt.show()
    # 看到交叉验证代价最小的是 λ=3
    print(lambdas[np.argmin(errors_val)])  # 表示使目标函数取最小值时的变量值

    # 3.4 计算训练集误差
    # 当power=6时，训练集代价为4.755271964962211
    # 当power=8时，训练集代价为3.859901586426067
    theta = trainLinearReg(X_norm, y, 3)
    print('test cost(l={}) = {}'.format(3, costReg(theta, Xtest_norm, ytest, 0)))
