import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from scipy.optimize import minimize

data = loadmat('ex5data1.mat')
print(data.keys())
# dict_keys(['__header__', '__version__', '__globals__', 'X', 'y', 'Xtest', 'ytest', 'Xval', 'yval'])


# 训练集
X_train, y_train = data['X'], data['y']
print(X_train)
# [[-15.93675813]
#  [-29.15297922]
#  [ 36.18954863]
#  [ 37.49218733]
#  [-48.05882945]
#  [ -8.94145794]
#  [ 15.30779289]
#  [-34.70626581]
#  [  1.38915437]
#  [-44.38375985]
#  [  7.01350208]
#  [ 22.76274892]]
print(y_train)
# [[ 2.13431051]
#  [ 1.17325668]
#  [34.35910918]
#  [36.83795516]
#  [ 2.80896507]
#  [ 2.12107248]
#  [14.71026831]
#  [ 2.61418439]
#  [ 3.74017167]
#  [ 3.73169131]
#  [ 7.62765885]
#  [22.7524283 ]]


# 验证集
X_val, y_val = data['Xval'], data['yval']

# 测试集
X_test, y_test = data['Xtest'], data['ytest']

# 修改一下
X_train = np.insert(X_train, 0, 1, axis=1)
X_val = np.insert(X_val, 0, 1, axis=1)
X_test = np.insert(X_test, 0, 1, axis=1)


def plot_data():
    fig, ax = plt.subplots()
    ax.scatter(X_train[:, 1], y_train)
    ax.set(xlabel='change in water level(x)',
           ylabel='water flowing out og the dam(y)')
    # plt.show()

plot_data()

# 带正则化的损失函数
def reg_cost(theta, X, y, lamda):
    cost = np.sum(np.power((X @ theta - y.flatten()), 2))
    reg = theta[1:] @ theta[1:] * lamda
    return (cost + reg) / (2 * len(X))

theta = np.ones(X_train.shape[1])
lamda = 1
print(reg_cost(theta, X_train, y_train, lamda))
# 303.9931922202643

# 定义梯度矩阵
def reg_gradient(theta, X, y, lamda):
    grad = (X @ theta - y.flatten()) @ X
    reg = lamda * theta
    reg[0] = 0
    return (grad + reg) / (len(X))

print(reg_gradient(theta, X_train, y_train, lamda))
# [-15.30301567 598.25074417]

# 开始训练
def train_model(X, y, lamda):
    theta = np.ones(X.shape[1])
    res = minimize(fun=reg_cost, x0=theta, args=(X,y,lamda), method='TNC', jac=reg_gradient)
    return res.x  # 返回优化好的theta

theta_final = train_model(X_train, y_train, lamda=0)


plot_data()
plt.plot(X_train[:, 1], X_train @ theta_final, c='r')
plt.show()


# 任务：训练样本从1开始递增进行训练，比较训练集和验证集上的损失函数的变化情况
def plot_learning_curve(X_train, y_train, X_val, y_val, lamda):
    # x：存放训练样本的个数
    x = range(1, len(X_train)+1)
    training_cost = []    # 训练集损失函数
    cv_cost = []    # 验证集损失函数

    for i in x:
        res = train_model(X_train[:i, :], y_train[:i, :], lamda)
        training_cost_i = reg_cost(res, X_train[:i, :], y_train[:i, :], lamda)
        cv_cost_i = reg_cost(res, X_val, y_val, lamda)
        training_cost.append(training_cost_i)
        cv_cost.append(cv_cost_i)

    # 损失函数的可视化
    # 训练集损失函数
    plt.plot(x, training_cost, label='training cost')
    plt.plot(x, cv_cost, label='cv cost')
    plt.legend()
    plt.xlabel('number of training examples')
    plt.ylabel('error')
    plt.show()


plot_learning_curve(X_train, y_train, X_val, y_val, lamda=0)


# 任务：构造多项式特征，进行多项式回归
def poly_feature(X, power):
    for i in range(2, power + 1):
        X = np.insert(X, X.shape[1], np.power(X[:, 1], i), axis=1)
    return X

# 获得均值和方差
def get_means_stds(X):
    means = np.mean(X, axis=0)
    stds = np.std(X, axis=0)
    return means, stds

# 特征归一化
def feature_normalize(X, means, stds):
    X[:, 1:] = (X[:, 1:] - means[1:]) / stds[1:]
    return X


power = 6
X_train_poly = poly_feature(X_train, power)
X_val_poly = poly_feature(X_val, power)
X_test_poly = poly_feature(X_test, power)

# 求得平均值和方差
train_means, train_stds = get_means_stds(X_train_poly)

# 归一化后的数据
X_train_norm = feature_normalize(X_train_poly, train_means, train_stds)
X_val_norm = feature_normalize(X_val_poly, train_means, train_stds)
X_test_norm = feature_normalize(X_test_poly, train_means, train_stds)


# 模型训练，获取最优的theta
theta_fit = train_model(X_train_norm, y_train, lamda=0)

# 绘制数据集以及拟合函数
def plot_poly_fit():
    plot_data()

    # 绘制拟合函数
    x = np.linspace(-60, 60, 100)
    xx = x.reshape(100, 1)
    xx = np.insert(xx, 0, 1, axis=1)
    xx = poly_feature(xx, power)
    xx = feature_normalize(xx, train_means, train_stds)

    plt.plot(x, xx @ theta_fit, 'r--')
    plt.show()


plot_poly_fit()  # 在训练集上的表现

plot_learning_curve(X_train_norm, y_train, X_val_norm, y_val, lamda=0)










