import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings

pd.set_option('display.max_columns', None,
              'display.expand_frame_repr', False)
warnings.filterwarnings('ignore')


def sep(label=''):
    print('-' * 32, label, '-' * 32, sep='')


# 1.	读入aviation数据集，设置MEMBER_NO为索引列。（10分）
sep('读入aviation数据集，设置MEMBER_NO为索引列')
df = pd.read_csv(r'../../../../../large_data/ML2/air_data.csv',
                 index_col='MEMBER_NO')
print(df.shape)

# 2.	剔除重复值、缺失值。（10分）
sep('剔除重复值、缺失值')
df.drop_duplicates(inplace=True)
df.dropna(inplace=True, axis=0)
print(df.shape)

# 3.	随机抽取500样本，切片特征X和标签Y。（10分）
sep('随机抽取500样本，切片特征X和标签Y')
np.random.seed(666)
m, n = df.shape
rand_idx = np.random.permutation(m)[:500]
df = df.iloc[rand_idx]
# 是否流失
df['FFP_DATE'] = pd.to_datetime(df['FFP_DATE'])
df['LOAD_TIME'] = pd.to_datetime(df['LOAD_TIME'])
y = (df['LOAD_TIME'] - df['FFP_DATE']).astype(np.int64) // (3600 * 24 * 1e9)
print(df[:5][['FFP_DATE', 'LOAD_TIME']])
print(y[:5])
y = (y > 365 * 5).astype(np.int64)
print(y[:5])
x = df[df.describe().columns]
print(x.shape)

# 4.	使用交叉验证方法（10折）比较逻辑回归、决策树算法性能差异，评估指标用F1分数（10分）
sep('使用交叉验证方法（10折）比较逻辑回归、决策树算法性能差异，评估指标用F1分数')
from sklearn.model_selection import cross_val_score
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier


def scorer(estimator, x, y):
    h = estimator.predict(x)
    return f1_score(y, h)


fixed_params = dict(solver='liblinear',
                    multi_class='auto',
                    max_iter=300,
                    )
estimator01 = estimator = LogisticRegression(**fixed_params)
score = cross_val_score(estimator, x, y, scoring=scorer, cv=10)
# print(type(score))
print(f'Score mean: {score.mean()}, score std: {score.std()}')

estimator02 = estimator = DecisionTreeClassifier()
score = cross_val_score(estimator, x, y, scoring=scorer, cv=10)
# print(type(score))
print(f'Score mean: {score.mean()}, score std: {score.std()}')

# 5.	使用网格搜索对上题中F1分数较高的算法进行超参数调优。（10分）
sep('5.	使用网格搜索对上题中F1分数较高的算法进行超参数调优')
from sklearn.model_selection import GridSearchCV
import os
import sys
import pickle

ver = 'v1.0'
path = sys.argv[0] + '_' + ver + '.tmp.dat'
if os.path.exists(path):
    with open(path, 'br') as f:
        grid = pickle.load(f)
    print('LOADED')
else:
    params = dict(penalty=['l1', 'l2'],
                  C=[0.1, 1, 5], )
    grid = GridSearchCV(estimator01, params, cv=5, iid=True)
    grid.fit(x, y)
    with open(path, 'bw') as f:
        pickle.dump(grid, f)
    print('SAVED')
print(f'Best score: {grid.best_score_}')
print(f'Best params: {grid.best_params_}')

# 6.	使用4、5中确定的最优算法和最优参数建立模型。（10分）
model = LogisticRegression(**fixed_params,
                           **(grid.best_params_),
                           )

# 7.	按照6：4划分数据集。（10分）
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.6, random_state=666)

# 8.	使用训练集数据进行模型训练，对测试集数据进行预测，打印混淆矩阵。（10分）
sep('8.	使用训练集数据进行模型训练，对测试集数据进行预测，打印混淆矩阵')
from sklearn.metrics import confusion_matrix
model.fit(x_train, y_train)
print(f'Training score = {model.score(x_train, y_train)}')
print(f'Testing score = {model.score(x_test, y_test)}')
h_test = model.predict(x_test)
proba_test = model.predict_proba(x_test)
print('混淆矩阵')
print(confusion_matrix(y_test, h_test))

# 9.	打印精确率、召回率、F1分数和AUC值。（10分）
sep('9.	打印精确率、召回率、F1分数和AUC值')
from sklearn.metrics import accuracy_score, recall_score, f1_score, roc_curve, roc_auc_score
print(f'精确率:{accuracy_score(y_test, h_test)}')
print(f'召回率:{recall_score(y_test, h_test)}')
print(f'F1分数:{f1_score(y_test, h_test)}')
print(f'AUC值:{roc_auc_score(y_test, proba_test[:, 1])}')

# 10.	画出ROC曲线。（10分）
sep('10.	画出ROC曲线')
fpr, tpr, thre = roc_curve(y_test, proba_test[:, 1])
plt.plot(fpr, tpr)
thre_len = len(thre)
group = thre_len // 8
for i, th in enumerate(thre):
    if i % group != 0:
        continue
    plt.annotate(f'{th:.2f}', xy=[fpr[i], tpr[i]])

# finally show all drawings
plt.show()

