import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from python_ai.common.xcommon import sep

pd.set_option('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', 1000,
              'display.expand_frame_repr', False)
plt.rcParams['font.sans-serif'] = ['Simhei']
plt.rcParams['axes.unicode_minus'] = False

plt.figure(figsize=[16, 8])
spr = 1
spc = 2
spn = 0

# 利用pandas读取Mass数据集，变量说明如下：BI-RADS assessment (BI-RADS评估), Age (年龄), Shape (肿块形状), Margin (肿块边缘) ,Density (肿块密度), Severity(1 代表恶性 0 代表良性)。要求：
# 1、	读取数据集（5分）
from sklearn.datasets import load_breast_cancer

x, y = load_breast_cancer(return_X_y=True)
df = pd.DataFrame(x)
lbl = pd.Series(y)

# 2、	去除缺失值（5分）
# print(df.info())
df.dropna(axis=1)
# print(df.info())

# scale
from sklearn.preprocessing import StandardScaler

df = StandardScaler().fit_transform(df)

# 3、	X，y进行切分（5分）
# 4、	使用留出法切分数据，比例7:3（5分）
from sklearn.model_selection import train_test_split

df_train, df_test, lbl_train, lbl_test \
    = train_test_split(df, lbl,
                       train_size=0.7, random_state=666)

# 5、	进行网格搜索交叉验证
# a)	设置学习率分别为1,0.5,0.2,0.1（5分）
# b)	创建逻辑回归模型（5分）
# c)	交叉验证6次（10分）
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression

fixed_params = dict(solver='liblinear',
                    max_iter=1000)
estimator = LogisticRegression(**fixed_params)
params = dict(penalty=['l1', 'l2'],
              C=[1, 0.5, 0.2, 0.1])
grid = GridSearchCV(estimator, params,
                    cv=5)

# d)	拟合训练集（10分）
grid.fit(df_train, lbl_train)
print(f'Best score = {grid.best_score_}')

# e)	找出最优参数（10分）
print(f'Best params = {grid.best_params_}')

# 6、	利用最优参数创建模型，进行拟合，预测（10分）
model = LogisticRegression(**fixed_params,
                           **(grid.best_params_))
model.fit(df_train, lbl_train)
h_test = model.predict(df_test)
proba_test = model.predict_proba(df_test)

# 7、	打印输出准确率，召回率，精确率（10分）
from sklearn.metrics import accuracy_score, recall_score, precision_score
print(f'准确率 = {accuracy_score(lbl_test, h_test)}')
print(f'召回率 = {recall_score(lbl_test, h_test)}')
print(f'精确率 = {precision_score(lbl_test, h_test)}')

# 8、	打印分类报告，混淆矩阵（10分）
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(lbl_test, h_test))
print(confusion_matrix(lbl_test, h_test))

# 9、	打印AUC数值，绘制roc曲线（10分）
from sklearn.metrics import roc_curve, roc_auc_score
fpr, tpr, thred = roc_curve(lbl_test, proba_test[:, 1])
spn += 1
plt.subplot(spr, spc, spn)
plt.plot(fpr, tpr)
xlen = len(thred)
group = xlen // 10
if group < 1:
    group = 1
for i, th in enumerate(thred):
    if not (i % group == 0 or i == xlen - 1):
        continue
    plt.annotate(f'{th:.2f}', xy=[fpr[i], tpr[i]])

# learning curve
from sklearn.model_selection import learning_curve
xlist = np.array(np.linspace(0.1, 1, 6))
train_size, train_score, test_score = \
    learning_curve(model, df, lbl, train_sizes=xlist, cv=5)
spn += 1
plt.subplot(spr, spc, spn)
cmap = plt.get_cmap('rainbow', 2)
train_score_m = train_score.mean(axis=1)
test_score_m = test_score.mean(axis=1)
train_score_s = train_score.std(axis=1)
test_score_s = test_score.std(axis=1)
# c0 = np.atleast_2d(cmap(0))  # for scatter
# c1 = np.atleast_2d(cmap(1))
c0 = cmap(0)
c1 = cmap(1)
plt.plot(train_size, train_score_m, c=c0, linestyle='-', marker='o')
plt.plot(train_size, test_score_m, c=c1, linestyle='-', marker='o')
plt.fill_between(train_size, train_score_m - train_score_s, train_score_m + train_score_s,
                 alpha=0.2, color=c0)
plt.fill_between(train_size, test_score_m - test_score_s, test_score_m + test_score_s,
                 alpha=0.2, color=c1)

# Finally show all drawings.
plt.show()
