import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import os
import pickle
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.model_selection import learning_curve

pd.set_option('display.max_columns', None,
              'display.expand_frame_repr', False)
spr = 2  # sub plot rows
spc = 3  # sub plot columns
spn = 0  # sub plot number
plt.figure(figsize=[12, 8])


def sep(label=''):
    print('-' * 32, label, '-' * 32, sep='')


# 2．使用202103.csv完成以下处理（35分）
# （1）数据处理
# ①获取数据，将instant删除（1分）
sep('①获取数据，将instant删除')
df = pd.read_csv(r'../../../../../large_data/ML2/202103.csv',
                 index_col=0)
m = len(df)
np.random.seed(666)
rand_idx = np.random.permutation(m)
df = df.iloc[rand_idx]
del df['instant']
print(df.iloc[:5])

# ②打印含有缺失列及其缺失比例（2分）
sep('②打印含有缺失列及其缺失比例')
sum_of_na = df.isnull().sum(axis=0)
na_col_idx = sum_of_na > 0
print(f'含有缺失列:{df.columns[na_col_idx]}')
print(f'其缺失比例: {sum_of_na[na_col_idx] / m}')

# ③创建新的一列cnt，数值为casual和registered的加和，删除掉casual和registered列信息（1分）
sep('③创建新的一列cnt，数值为casual和registered的加和，删除掉casual和registered列信息')
df['cnt'] = df['casual'] + df['registered']
del df['casual']
del df['registered']
print(df.iloc[:5])

# （2）缺失值处理
# ①使用缺失值预测方式，数据切分训练集和测试集（2分）
sep('①使用缺失值预测方式，数据切分训练集和测试集')
idx_test = df['holiday'].isnull()
idx_train = np.invert(idx_test)
df_train = df.loc[idx_train]
df_test = df.loc[idx_test]
print(f'df_train: {df_train.shape}')
print(f'df_test: {df_test.shape}')

# ②使用season，mnth，weekday，cnt作为特征（1分）
x_train = df_train[['season', 'mnth', 'weekday', 'cnt']]
x_test = df_test[['season', 'mnth', 'weekday', 'cnt']]
y_train = df_train['holiday']

# Cache it
ver = 'v1.0'
path = sys.argv[0] + '_' + ver + '.tmp.dat'
if os.path.exists(path):
    with open(path, 'br') as f:
        best_score, best_estimator, best_params = pickle.load(f)
        sep(f'Loaded cached data for version {ver}')
else:
    best_score = None
    best_estimator = None
    best_params = None

    # ③使用逻辑回归，配合交叉验证，参数自定，验证模型评分效果（1分）
    sep('③使用逻辑回归，配合交叉验证，参数自定，验证模型评分效果')
    estimator = LogisticRegression()
    params = dict(solver=['liblinear'],
                  multi_class=['auto'],
                  penalty=['l1', 'l2'],
                  C=[0.1, 0.5, 1, 5, 10],
                  )
    grid = GridSearchCV(estimator, params, cv=5, iid=True)
    grid.fit(x_train, y_train)
    score = grid.best_score_
    params = grid.best_params_
    print(f'Best score = {score}')
    print(f'Best params = {params}')
    if best_score is None or score > best_score:
        best_score = score
        best_estimator = estimator.__class__
        best_params = params

    # ④使用knn，配合交叉验证，参数自定，验证模型评分效果（1分）
    sep('④使用knn，配合交叉验证，参数自定，验证模型评分效果')

    estimator = KNeighborsClassifier()
    params = dict(n_neighbors=[5, 10, 20, 30],
                  p=[1, 2],
                  )
    grid = GridSearchCV(estimator, params, cv=5, iid=True)
    grid.fit(x_train, y_train)
    score = grid.best_score_
    params = grid.best_params_
    print(f'Best score = {score}')
    print(f'Best params = {params}')
    if best_score is None or score > best_score:
        best_score = score
        best_estimator = estimator.__class__
        best_params = params

    with open(path, 'bw') as f:
         pickle.dump([best_score, best_estimator, best_params], f)
print(f'Best model: {best_estimator}')
print(f'Best score: {best_score}')
print(f'Best params: {best_params}')

# ⑤将模型得分更高的模型作为预测模型，将缺失值进行填充处理（4分）
sep('⑤将模型得分更高的模型作为预测模型，将缺失值进行填充处理')
model = best_estimator(**best_params)
model.fit(x_train, y_train)
df.loc[idx_test, 'holiday'] = model.predict(x_test)
print(df.info())

# （3）数据分析
# ①可视化cnt中的噪音数据（1分）
spn += 1
ax = plt.subplot(spr, spc, spn)
plt.title('Noise data of cnt')
sns.boxplot(data=df, y='cnt', ax=ax)

# ②将cnt中的噪音样本从数据集中去除（3分）
sep('②将cnt中的噪音样本从数据集中去除')
print(df.shape)
mu = df['cnt'].mean()
sigma = df['cnt'].std()
idx_noise = abs(df['cnt'] - mu) > 3 * sigma
df = df.loc[~idx_noise]
print(df.shape)

# ③绘制holiday和cnt之间的关系（1分）
spn += 1
ax = plt.subplot(spr, spc, spn)
plt.title('holiday - cnt')
sns.barplot(data=df, x='holiday', y='cnt', estimator=np.sum, ci=None, ax=ax)

# ④使用现有连续型数据绘制热图（3分）
df_conti = df[['temp', 'atemp', 'hum', 'windspeed']]
xcorr = df_conti.corr()
spn += 1
ax = plt.subplot(spr, spc, spn)
plt.title('Heapmap of continuous data')
sns.heatmap(data=xcorr, annot=True, ax=ax)
top, bottom = ax.get_ylim()
ax.set_ylim(top + 0.5, bottom - 0.5)

# ⑤热图中关联度超过0.6的相关特征删除其中一个（2分）
del df['atemp']

# （4）数据处理
# ②将连续型数据进行特征缩放，离散数据进行独热处理（1分）
# dteday  season  yr  mnth  hr  holiday  weekday  workingday  weathersit  temp   hum  windspeed  cnt
sep('②将连续型数据进行特征缩放，离散数据进行独热处理')
y = df[['cnt']]
x1 = df[['holiday', 'weekday',  'workingday',  'weathersit']]
x2 = df[['temp',   'hum',  'windspeed']]
from sklearn.preprocessing import OneHotEncoder, StandardScaler
oh = OneHotEncoder(categories='auto')
x1 = oh.fit_transform(x1)
std = StandardScaler()
y = std.fit_transform(y).ravel()
x2 = std.fit_transform(x2)
x = np.c_[x1.A, x2]
# ①将数据分为训练集和测试集两部分（1分）
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, random_state=666)

# Cache it
ver = 'p2_v1.0'
path = sys.argv[0] + '_' + ver + '.tmp.dat'
if os.path.exists(path):
    with open(path, 'br') as f:
        best_score, best_estimator, best_params, model_arr = pickle.load(f)
        sep(f'Loaded cached data for version {ver}')
else:
    best_score = None
    best_estimator = None
    best_params = None
    model_arr = []

    # ③创建knn模型，配合交叉验证，找出最优参数（1分）
    sep('③创建knn模型，配合交叉验证，找出最优参数')
    estimator = KNeighborsRegressor()
    params = dict(n_neighbors=[5, 10, 20, 30],
                  p=[1, 2],)
    grid = GridSearchCV(estimator, params, cv=5, iid=True)
    grid.fit(x_train, y_train)
    score = grid.best_score_
    params = grid.best_params_
    print(f'Best score = {score}')
    print(f'Best params = {params}')
    model_arr.append([estimator.__class__, params])
    if best_score is None or score > best_score:
        best_score = score
        best_estimator = estimator.__class__
        best_params = params

    # ④创建l2模型，配合交叉验证，找出最优参数（1分）
    estimator = Ridge()
    params = dict(alpha=[0.1, 0.5, 1, 5, 10],
                  )
    grid = GridSearchCV(estimator, params, cv=5, iid=True)
    grid.fit(x_train, y_train)
    score = grid.best_score_
    params = grid.best_params_
    print(f'Best score = {score}')
    print(f'Best params = {params}')
    model_arr.append([estimator.__class__, params])
    if best_score is None or score > best_score:
        best_score = score
        best_estimator = estimator.__class__
        best_params = params

    with open(path, 'bw') as f:
        pickle.dump([best_score, best_estimator, best_params, model_arr], f)
print(f'Best score: {best_score}')
print(f'Best params: {best_params}')
print(f'Best estimator: {best_estimator}')

# ⑤根据两个模型，绘制学习率曲线（5分）
train_sizes = np.linspace(0.1, 1, 6)
# Cache it
ver = 'p3_v1.0'
path = sys.argv[0] + '_' + ver + '.tmp.dat'
if os.path.exists(path):
    with open(path, 'br') as f:
        curve_data_arr = pickle.load(f)
        sep(f'Loaded cached data for version {ver}')
else:
    curve_data_arr = []
    for model_cls, params in model_arr:
        model = model_cls(**params)
        train_sizes_abs, train_scores, test_scores = learning_curve(model, x, y, train_sizes=train_sizes, cv=5)
        curve_data_arr.append([train_sizes_abs, train_scores, test_scores, model])
    with open(path, 'bw') as f:
        pickle.dump(curve_data_arr, f)
    sep('Get learning curve data and cache it.')
for train_sizes_abs, train_scores, test_scores, model in curve_data_arr:
    spn += 1
    ax = plt.subplot(spr, spc, spn)
    plt.title(f'{model.__class__.__name__}')
    train_scores_m = train_scores.mean(axis=1)
    test_scores_m = test_scores.mean(axis=1)
    train_scores_s = train_scores.std(axis=1)
    test_scores_s = test_scores.std(axis=1)
    plt.plot(train_sizes_abs, train_scores_m, color='b', label='train')
    plt.plot(train_sizes_abs, test_scores_m, color='r', label='test')
    plt.fill_between(train_sizes_abs, train_scores_m - train_scores_s, train_scores_m + train_scores_s, color='b', alpha=0.5)
    plt.fill_between(train_sizes_abs, test_scores_m - test_scores_s, test_scores_m + test_scores_s, color='r', alpha=0.5)
    plt.legend()


# （5）模型评估（
# ①分析knn和l2模型哪个效果更好（说明）（2分）
# 答：knn模型效果好，随着样本数量的增加，其效果有明显的提升。而l2模型则不行，增加样本对效果没有任何帮助。
# ②打印模型测试集的R2结果（1分）
sep('②打印模型测试集的R2结果')
model = best_estimator(**best_params)
model.fit(x_test, y_test)
h_test = model.predict(x_test)
from sklearn.metrics import r2_score
print(f'测试集的R2结果: {r2_score(y_test, h_test)}')

# Finally show all plotting
plt.show()
