# %%
from sklearn.linear_model import Lasso, LassoLarsIC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, validation_curve, learning_curve, ShuffleSplit
from sklearn.metrics import mean_squared_error
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

# %% [markdown]
# ## 载入数据集
# 
# 以 8:2 随机划分训练集和测试集，以42为随机种子（使得结果可以复现）

# %%
names = [
    'longitude',
    'latitude',
    'housingMedianAge',
    'totalRooms',
    'totalBedrooms',
    'population',
    'households',
    'medianIncome',
    'medianHouseValue'
]
df = pd.read_csv('cal_housing.data', header=None, names=names)
df.head()

# %%
X = df.values[:, 0:8]
y = df.values[:, 8]

# %%
std = StandardScaler()
X = std.fit_transform(X)

# %%
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, shuffle=True)

# %% [markdown]
# ## Lasso 回归模型定义
# 
# Lasso 回归的目标函数是在均方误差（ $MSE$ ）的基础上添加 $L1$ 正则化项：
# 
# $$
# \min_\beta (\cfrac{1}{2n} \sum_{i=1}^n (y_i - \mathbf{x}_i^T \beta)^2 + \lambda \sum_{j=1}^p |\beta_j |)
# $$

# %% [markdown]
# ## 自定义 $L1$ 范数系数
# 
# `scikit-learn` 中的 Lasso 以  
# `(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1`  
# 为最小化目标，其中 `alpha` 即为数学上 $L1$ 范数系数 $\lambda$

# %%
model = Lasso(alpha=1.0)
model.fit(X_train, y_train)

# %% [markdown]
# ## 模型评估

# %%
print(f'系数\t{model.coef_}')
print(f'截距\t{model.intercept_}')
print(f'R2评估\t{model.score(X_test, y_test)}')

y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)

print(f'MSE\t{mse}')

# %%
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_test[:, 0], X_test[:, 1], y_pred, alpha=0.3)
ax.view_init(30, 50)
x1_range = np.linspace(X_test[:, 0].min(), X_test[:, 0].max(), 10)
x2_range = np.linspace(X_test[:, 1].min(), X_test[:, 1].max(), 10)
X1_grid, X2_grid = np.meshgrid(x1_range, x2_range)

intercept = model.intercept_
coef1, coef2 = model.coef_[0], model.coef_[1]
Z = intercept + coef1 * X1_grid + coef2 * X2_grid
ax.plot_surface(X1_grid, X2_grid, Z, cmap='viridis', alpha=0.2)

plt.show()

# %% [markdown]
# ## 自动选择 `alpha` 取值
# 
# `sklearn` 内置使用 AIC（赤池信息准则）或 BIC（贝叶斯信息准则）等统计指标，选择准则值最小的 `alpha` 的算法

# %%
model_ic = LassoLarsIC(criterion='bic')
model_ic.fit(X_train, y_train)

print(f'系数\t{model_ic.coef_}')
print(f'截距\t{model_ic.intercept_}')
print(f'R2评估\t{model_ic.score(X_test, y_test)}')

y_pred = model_ic.predict(X_test)
mse = mean_squared_error(y_test, y_pred)

print(f'MSE\t{mse}')

# %% [markdown]
# R2反而变小了，MSE反而变大了

# %%
model_ic.alpha_

# %% [markdown]
# 事实上，当alpha=0，这就是线性回归

# %% [markdown]
# ## 网络搜索分析最优惩罚项系数

# %%
alphas = np.logspace(-2, 3, 20)

train_scores, valid_scores = validation_curve(
    Lasso(), X, y, param_name="alpha", param_range=alphas, cv=5, scoring='r2'
)

# plt.subplot(2, 1, 1)
plt.title('Validation Curve for Lasso(r2)')
plt.semilogx(alphas, np.mean(train_scores, axis=1), label='Training score')
plt.ylabel('Score')
plt.legend()
# plt.subplot(2, 1, 2)
plt.semilogx(alphas, np.mean(valid_scores, axis=1), label='Validation score')
plt.xlabel('Alpha (Regularization strength)')
plt.ylabel('Score')
plt.legend()
plt.show()

# %%
alphas = np.logspace(-2, 3, 20)

train_scores, valid_scores = validation_curve(
    Lasso(), X, y, param_name="alpha", param_range=alphas, cv=5, scoring='neg_mean_squared_error'
)

# plt.subplot(2, 1, 1)
plt.title('Validation Curve for Lasso(-MSE)')
plt.semilogx(alphas, np.mean(train_scores, axis=1), label='Training score')
plt.ylabel('Score(train)')
plt.legend()
# plt.subplot(2, 1, 2)
plt.semilogx(alphas, np.mean(valid_scores, axis=1), label='Validation score')
plt.xlabel('Alpha (Regularization strength)')
plt.ylabel('Score(test)')
plt.legend()
plt.show()

# %% [markdown]
# ## 结果分析
# 
# 训练集的评价比验证集高出很多，这表明模型过拟合了，泛化能力差。  
# 但困境在于，这个模型总共就只有一个超参数，该怎么办呢。

# %% [markdown]
# ## 学习过程曲线分析

# %%
# 生成学习曲线数据
model = Lasso(0.1)
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
train_sizes, train_scores, valid_scores = learning_curve(
    model, X, y, 
    train_sizes=np.linspace(0.1, 1.0, 10),  # 从10%到100%的训练数据
    cv=cv, 
    scoring='neg_mean_squared_error',
    n_jobs=-1
)

# 计算均值和标准差
train_mean = -np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
valid_mean = -np.mean(valid_scores, axis=1)
valid_std = np.std(valid_scores, axis=1)

# 绘制学习曲线
plt.figure(figsize=(10, 6))
plt.title('Lasso Learn route')
plt.xlabel('Train sample size')
plt.ylabel('MSE')

# 绘制训练曲线
plt.plot(train_sizes, train_mean, color='blue', marker='o', markersize=5, label='Train MSE')
plt.fill_between(train_sizes, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue')

# 绘制验证曲线
plt.plot(train_sizes, valid_mean, color='green', marker='s', markersize=5, label='Valid MSE')
plt.fill_between(train_sizes, valid_mean + valid_std, valid_mean - valid_std, alpha=0.15, color='green')

plt.legend(loc='upper right')
plt.grid()
plt.show()

# %% [markdown]
# 这么看是正常的，训练集的方差越来越大，验证集的方差越来越小，说明确实没有过拟合越来越少
# 但离谱的是。。。为什么训练集的方差比验证集大？

# %%
# 生成学习曲线数据
model = Lasso(0.1)
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=42)
train_sizes, train_scores, valid_scores = learning_curve(
    model, X, y, 
    train_sizes=np.linspace(0.1, 1.0, 10),  # 从10%到100%的训练数据
    cv=cv, 
    scoring='neg_mean_squared_error',
    n_jobs=-1
)

# 计算均值和标准差
train_mean = -np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
valid_mean = -np.mean(valid_scores, axis=1)
valid_std = np.std(valid_scores, axis=1)

# 绘制学习曲线
plt.figure(figsize=(10, 6))
plt.title('Lasso Learn route')
plt.xlabel('Train sample size')
plt.ylabel('MSE')

# 绘制训练曲线
plt.plot(train_sizes, train_mean, color='blue', marker='o', markersize=5, label='Train MSE')
plt.fill_between(train_sizes, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue')

# 绘制验证曲线
plt.plot(train_sizes, valid_mean, color='green', marker='s', markersize=5, label='Valid MSE')
plt.fill_between(train_sizes, valid_mean + valid_std, valid_mean - valid_std, alpha=0.15, color='green')

plt.legend(loc='upper right')
plt.grid()
plt.show()

# %%
# 生成学习曲线数据
model = Lasso(0.1)
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=88)
train_sizes, train_scores, valid_scores = learning_curve(
    model, X, y, 
    train_sizes=np.linspace(0.1, 1.0, 10),  # 从10%到100%的训练数据
    cv=cv, 
    scoring='neg_mean_squared_error',
    n_jobs=-1
)

# 计算均值和标准差
train_mean = -np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
valid_mean = -np.mean(valid_scores, axis=1)
valid_std = np.std(valid_scores, axis=1)

# 绘制学习曲线
plt.figure(figsize=(10, 6))
plt.title('Lasso Learn route')
plt.xlabel('Train sample size')
plt.ylabel('MSE')

# 绘制训练曲线
plt.plot(train_sizes, train_mean, color='blue', marker='o', markersize=5, label='Train MSE')
plt.fill_between(train_sizes, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue')

# 绘制验证曲线
plt.plot(train_sizes, valid_mean, color='green', marker='s', markersize=5, label='Valid MSE')
plt.fill_between(train_sizes, valid_mean + valid_std, valid_mean - valid_std, alpha=0.15, color='green')

plt.legend(loc='upper right')
plt.grid()
plt.show()
