from sklearn.preprocessing import MinMaxScaler
import pandas as pd  # data processing, CSV file I/O
import numpy as np  # 矩阵操作
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import week4Base4_RidgeCV_visualization_weightCoef
import week4Base4_LineRegres
import week4Base4_Lasso
df = pd.read_csv("boston_housing.csv")

# 通过观察前5行，了解数据每列（特征）的概况
print(df.head())
# 从原始数据中分离输入特征x和输出y
y = df['MEDV']
X = df.drop('MEDV', axis=1)

# 尝试对y（房屋价格）做log变换，对log变换后的价格进行估计
log_y = np.log1p(y)

# RAD的含义是距离高速公路的便利指数。虽然给的数值是数值型，但实际是索引，可换成离散特征/类别型特征编码试试。
X["RAD"].astype("object")
X_cat = X["RAD"]
X_cat = pd.get_dummies(X_cat, prefix="RAD")
X = X.drop("RAD", axis=1)

# 特征名称，用于保存特征工程结果
feat_names = X.columns
# 分别初始化对特征和目标值的标准化器
ss_X = MinMaxScaler()
ss_y = MinMaxScaler()

ss_log_y = MinMaxScaler()

# 分别对训练和测试数据的特征以及目标值进行标准化处理
# 对训练数据，先调用fit方法训练模型，得到模型参数；然后对训练数据和测试数据进行transform
X = ss_X.fit_transform(X)

# 对y标准化的好处是不同问题的w差异不太大，同时正则参数的范围也有限
y = ss_y.fit_transform(y.values.reshape(-1, 1))
log_y = ss_y.fit_transform(log_y.values.reshape(-1, 1))
fe_data = pd.DataFrame(data=X, columns=feat_names, index=df.index)
fe_data = pd.concat([fe_data, X_cat], axis=1, ignore_index=False)

# 加上标签y
fe_data["MEDV"] = y
fe_data["log_MEDV"] = log_y

# 保存结果到文件
fe_data.to_csv('FE_boston_housingMinMax.csv', index=False)
print(fe_data.head())
print('The feature date info is')
fe_data.info()

df_minmax = pd.read_csv("FE_boston_housingMinMax.csv")

# 通过观察前5行，了解数据每列（特征）的概况
#print(df_minmax.head())
# 从原始数据中分离输入特征x和输出y
y1 = df_minmax["MEDV"]
X1 = df_minmax.drop(["MEDV", "log_MEDV"], axis=1)

# 特征名称，用于后续显示权重系数对应的特征
feat_names1 = X1.columns
# 随机采样20%的数据构建测试样本，其余作为训练样本
X_train, X_test, y_train, y_test = train_test_split(X1, y1, random_state=33, test_size=0.2)
#print(X_train.shape)

lrCoef, y_train_pred_lr = week4Base4_LineRegres.lineRegres(X_train, y_train, X_test, y_test)

# 在训练集上观察预测残差的分布，看是否符合模型假设：噪声为0均值的高斯噪声
f, ax = plt.subplots(figsize=(7, 5))
f.tight_layout()
ax.hist(y_train - y_train_pred_lr, bins=40, label='Residuals Linear', color='b', alpha=.5)
ax.set_title("Histogram of Residuals")
ax.legend(loc='best')

mse_mean, alphas, ridgealpha, ridgeCoef = week4Base4_RidgeCV_visualization_weightCoef.visual_weightCoef(X_train, y_train, X_test,
                                                                                             y_test)
plt.figure(2)
plt.plot(np.log10(alphas), mse_mean.reshape(len(alphas), 1))
plt.xlabel('log(alpha)')
plt.ylabel('ridgemse')

print('ridgealpha is:', ridgealpha)

lassoMses, lassoAlpha, lassoCoef = week4Base4_Lasso.lassoTest(X_train, y_train, X_test, y_test)
print(lassoAlpha[-1])
plt.figure(3)
plt.plot(np.log10(lassoAlpha), lassoMses)
plt.xlabel('log(alpha)')
plt.ylabel('lassoMses')

print('lassoalpha is:', lassoAlpha)


# 看看各特征的权重系数，系数的绝对值大小可视为该特征的重要性
fs = pd.DataFrame({"columns": list(feat_names1), "coef_lr": list(lrCoef.T), "coef_ridge": list(ridgeCoef.T),"coef_lasso":list((lassoCoef.T))})
print(fs.sort_values(by=['coef_lr'], ascending=False))
plt.show()