import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor

plt.rcParams['font.sans-serif'] = ['SimHei']
pd.set_option('display.max_columns', None,
              'display.expand_frame_repr', False)


def sep(label=''):
    """Utility function to print separator line."""
    print('-' * 32, label, '-' * 32, sep='')


# 3.
# 根据题目要求，完成以下针对泰坦尼克数据集中的缺失值进行处理（25分）
# (1)
# 数据处理
# ①    读取数据，查看数据中的缺失列
sep('①    读取数据，查看数据中的缺失列')
df = pd.read_csv(r'../../../large_data/ML2/monthly_exam/train.csv')
m, n = df.shape
print(m, n)
# shuffle data
np.random.seed(666)
idx_rand = np.random.permutation(m)
df = df.iloc[idx_rand]
print(df[:5])
print(df.info())

# ②    打印缺失数据的比例
sep('②    打印缺失数据的比例')
print(df.isnull().sum(axis=0) / m)

# subplot group
spr = 1  # subplot rows
spc = 2  # subplot columns
spn = 0  # subplot number
plt.figure(figsize=[12, 6])

# (2)
# 可视化分析
# ①    查看各船票等级及获救情况
spn += 1
ax = plt.subplot(spr, spc, spn)
plt.title('各船票等级的获救情况')
df['获救情况'] = df['Survived'].map({1: '获救', 0: '未获救'})
sns.countplot(data=df,
              x='Pclass',
              hue='获救情况',
              ax=ax)
ax.set_xlabel('船票等级')
ax.set_ylabel('人数')

# ②    查看性别获救情况
spn += 1
ax = plt.subplot(spr, spc, spn)
plt.title('性别获救情况')
sns.countplot(data=df,
              x='Sex',
              hue='获救情况',
              ax=ax)
ax.set_xlabel('性别')
ax.set_ylabel('人数')

# (3)
# 数据处理
# ①    使用SibSp和Parch两个特征，生成新的一列家庭规模
df['家庭规模'] = df['SibSp'] + df['Parch']

# ②    将Pclass、Sex、家庭规模进行独热处理
sep('②    将Pclass、Sex、家庭规模进行独热处理')
dfx = df[['Pclass', 'Sex', '家庭规模']]
dfx = pd.get_dummies(dfx, columns=['Pclass', 'Sex', '家庭规模'])
print(dfx[:5])

# (4)
# Age缺失处理
# ①    使用上面三个字段作为特征，其他age作为标签
x = dfx
y = df['Age']

# ②    Age不缺失的作为训练集，缺失数据作为测试集
sep('②    Age不缺失的作为训练集，缺失数据作为测试集')
idx_age_na = y.isnull()
idx_age_ok = ~idx_age_na
x_train = x.loc[idx_age_ok]
x_test = x.loc[idx_age_na]
y_train = y.loc[idx_age_ok]
print(f'x_train: {x_train.shape}')
print(f'x_test: {x_test.shape}')
print(f'y_train: {y_train.shape}')

# best model
best_score = None
best_cls = None
best_params = None

# ③    使用KNN配合网格搜索交叉验证，参数自拟
sep('③    使用KNN配合网格搜索交叉验证')
estimator = KNeighborsRegressor()
params = dict(n_neighbors=[20, 30, 40])
grid = GridSearchCV(estimator, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'{estimator.__class__} best score: {grid.best_score_}')
print(f'{estimator.__class__} best params: {grid.best_params_}')
if best_score is None or grid.best_score_ > best_score:
    best_score = grid.best_score_
    best_cls = estimator.__class__
    best_params = grid.best_params_

# ④    使用决策树配合网格搜索交叉验证，参数自拟
sep('④    使用决策树配合网格搜索交叉验证')
estimator = DecisionTreeRegressor()
params = dict(max_depth=[5, 10, 20])
grid = GridSearchCV(estimator, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'{estimator.__class__} best score: {grid.best_score_}')
print(f'{estimator.__class__} best params: {grid.best_params_}')
if best_score is None or grid.best_score_ > best_score:
    best_score = grid.best_score_
    best_cls = estimator.__class__
    best_params = grid.best_params_

# ⑤    使用上两问的数据，打印最优参数和最后模型
sep('⑤    使用上两问的数据，打印最优参数和最后模型')
print(f'最优参数: {best_params}')
print(f'最后模型: {best_cls}')

# ⑥    使用最优参数和最优模型，训练训练数据，将测试预测结果作为age的缺失值进行填充，补全缺失值
sep('⑥    使用最优参数和最优模型，训练训练数据，将测试预测结果作为age的缺失值进行填充，补全缺失值')
model = best_cls(**best_params)
model.fit(x_train, y_train)
print(f'操作前Age缺失量：{df["Age"].isnull().sum()}')
df.loc[idx_age_na, 'Age'] = model.predict(x_test)
print(f'操作后Age缺失量：{df["Age"].isnull().sum()}')

# Finally show all plotting
plt.show()
