import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

pd.set_option('display.max_columns', None,
              'display.expand_frame_repr', False)


def sep(label=''):
    print('-' * 32, label, '-' * 32, sep='')


# 现有文件名为aviation的航空客运信息数据集，共包括5000个样本，每个样本有55个属性，其中runoff_flag代表是否流失，
# 要求通过这些数据构建客户流失预警模型，而且由于营销资源有限，希望结合客户特征进行有针对性的、高效率的开展客户挽留。
# 具体任务如下：
# 1.	读入aviation数据集，设置MEMBER_NO为索引列。（10分）
sep('1.	读入aviation数据集，设置MEMBER_NO为索引列')
df = pd.read_excel(r'../../../large_data/ML2/aviation.xls',
                   index_col='MEMBER_NO')
# DAYS_FROM_LAST_TO_END  DAYS_FROM_BEGIN_TO_FIRST  FFP_TIER   age  FLIGHT_COUNT  FLIGHT_COUNT_QTR_1
# FLIGHT_COUNT_QTR_2  FLIGHT_COUNT_QTR_3  FLIGHT_COUNT_QTR_4  FLIGHT_COUNT_QTR_5  FLIGHT_COUNT_QTR_6
# FLIGHT_COUNT_QTR_7  FLIGHT_COUNT_QTR_8  BASE_POINTS_SUM  BASE_POINTS_SUM_QTR_1  BASE_POINTS_SUM_QTR_2
# BASE_POINTS_SUM_QTR_3  BASE_POINTS_SUM_QTR_4  BASE_POINTS_SUM_QTR_5  BASE_POINTS_SUM_QTR_6  BASE_POINTS_SUM_QTR_7
# BASE_POINTS_SUM_QTR_8  ELITE_POINTS_SUM_YR_1  ELITE_POINTS_SUM_YR_2  EXPENSE_SUM_YR_1  EXPENSE_SUM_YR_2  SEG_KM_SUM
# WEIGHTED_SEG_KM  AVG_FLIGHT_COUNT  AVG_BASE_POINTS_SUM  AVG_FLIGHT_INTERVAL  MAX_FLIGHT_INTERVAL  MILEAGE_IN_COUNT
# ADD_POINTS_SUM_YR_1  ADD_POINTS_SUM_YR_2  EXCHANGE_COUNT  Avg_Discount  P1Y_Flight_Count  L1Y_Flight_Count
# P1Y_BASE_POINTS_SUM  L1Y_BASE_POINTS_SUM  ELITE_POINTS_SUM  ADD_POINTS_SUM  Eli_Add_Point_Sum  L1Y_ELi_Add_Points
# Points_Sum  L1Y_Points_Sum  Ration_L1Y_Flight_Count  Ration_P1Y_Flight_Count  Ration_P1Y_BPS  Ration_L1Y_BPS
# Point_Chg_NotFlight  FFP_DAYS  runoff_flag

# 2.	剔除重复值、缺失值。（10分）
sep('2.	剔除重复值、缺失值')
print(df.shape)
df.drop_duplicates(inplace=True)
df.dropna(axis=0, inplace=True)
print(df.shape)

# 3.	随机抽取500样本，切片特征X和标签Y。（10分）
sep('3.	随机抽取500样本，切片特征X和标签Y')
m, n = df.shape
np.random.seed(666)
rand_idx = np.random.permutation(m)[:500]
df = df.iloc[rand_idx]
x = df.iloc[:, :-1]
y = df.iloc[:, -1]

# 4.	使用交叉验证方法（10折）比较逻辑回归、决策树算法性能差异，评估指标用F1分数（10分）
sep('4.	使用交叉验证方法（10折）比较逻辑回归、决策树算法性能差异，评估指标用F1分数')
from sklearn.model_selection import cross_val_score
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier


def x_f1_scorer(estimator, x, y):
    h = estimator.predict(x)
    return f1_score(y, h)


fixed_params01 = dict(solver='liblinear',
                      multi_class='auto',
                      max_iter=500,
                      )
estimator01 = estimator = LogisticRegression(**fixed_params01)
score_list = cross_val_score(estimator, x, y, scoring=x_f1_scorer, cv=10)
score_mean = score_list.mean()
print(f'Mean score: {score_mean}, std: {score_list.std()}')
# Mean score: 0.9809266409266411, std: 0.02438015288351396

estimator02 = estimator = DecisionTreeClassifier()
score_list = cross_val_score(estimator, x, y, scoring=x_f1_scorer, cv=10)
score_mean = score_list.mean()
print(f'Mean score: {score_mean}, std: {score_list.std()}')
# Mean score: 0.9458350658350658, std: 0.02725638852141435

# 5.	使用网格搜索对上题中F1分数较高的算法进行超参数调优。（10分）
sep('5.	使用网格搜索对上题中F1分数较高的算法进行超参数调优')
from sklearn.model_selection import GridSearchCV
params = dict(penalty=['l1', 'l2'], C=[0.1, 1, 2, 5])
grid = GridSearchCV(estimator01, params, cv=5, iid=True, scoring=x_f1_scorer)
grid.fit(x, y)
print(f'Best score: {grid.best_score_}')
print(f'Best params: {grid.best_params_}')

# 6.	使用4、5中确定的最优算法和最优参数建立模型。（10分）
model = LogisticRegression(**fixed_params01, **(grid.best_params_))

# 7.	按照6：4划分数据集。（10分）
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.6, random_state=666)

# 8.	使用训练集数据进行模型训练，对测试集数据进行预测，打印混淆矩阵。（10分）
sep('8.	使用训练集数据进行模型训练，对测试集数据进行预测，打印混淆矩阵')
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, roc_auc_score, roc_curve
model.fit(x_train, y_train)
h_test = model.predict(x_test)
proba_test = model.predict_proba(x_test)[:, 1]  # ATTENTION
print('混淆矩阵')
print(confusion_matrix(y_test, h_test))

# 9.	打印精确率、召回率、F1分数和AUC值。（10分）
sep('9.	打印精确率、召回率、F1分数和AUC值')
print(f'精确率: {accuracy_score(y_test, h_test)}')
print(f'召回率: {recall_score(y_test, h_test)}')
print(f'F1分数: {f1_score(y_test, h_test)}')
print(f'AUC值: {roc_auc_score(y_test, proba_test)}')

# 10.	画出ROC曲线。（10分）
sep('10.	画出ROC曲线')
fpr, tpr, thre = roc_curve(y_test, proba_test)
plt.figure(figsize=[8, 8])
plt.plot(fpr, tpr)
xlen = len(thre)
group = xlen // 5
for i, th in enumerate(thre):
    if i % group != 0:
        continue
    plt.annotate(f'{th:.2f}', xy=[fpr[i], tpr[i]])
plt.show()
