import numpy as np
import matplotlib.pyplot as plt
from python_ai.common.xcommon import sep
import datetime

# import warnings
# warnings.filterwarnings('ignore')

# init drawings
spr = 1
spc = 2
spn = 0
plt.figure(figsize=[12, 6])

# load data
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
x = data.data
y = data.target
m = len(x)
print(f'm = {m}')

# split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y,
                                                    train_size=0.7,
                                                    random_state=666)

# grid search
fixed_param = dict(solver='liblinear',
                   max_iter=1000)
ver = 'v2.0'
import sys, os, pickle
from sklearn.linear_model import LogisticRegression
path = sys.argv[0] + '_' + ver + '.tmp.dat'
if os.path.exists(path):
    f = open(path, 'br')
    best_params = pickle.load(f)
    f.close()
    print(f'LOADED. ver={ver}')
else:
    print('Grid searching ...')
    estimator = LogisticRegression(**fixed_param)
    params = dict(C=[0.1, 0.2, 1, 2, 5, 10],
                  penalty=['l1', 'l2'])
    from sklearn.model_selection import GridSearchCV
    grid = GridSearchCV(estimator, params, cv=5)
    grid.fit(x_train, y_train)
    print(f'best score: {grid.best_score_}')
    print(f'best params: {grid.best_params_}')
    best_params = grid.best_params_
    f = open(path, 'bw')
    pickle.dump(best_params ,f)
    f.close()

# best model
model = LogisticRegression(**fixed_param,
                           **best_params)
model.fit(x_train, y_train)
print(f'Training score = {model.score(x_train, y_train)}')
print(f'Testing score = {model.score(x_test, y_test)}')
h_test = model.predict(x_test)
y_predict_pro = model.predict_proba(x_test)
print(np.c_[y_predict_pro[:5], h_test[:5]])

# metric
sep('ROC')
from sklearn.metrics import roc_curve, roc_auc_score
y_predict_1 = y_predict_pro[:, 1]
fpr, tpr, th = roc_curve(y_test, y_predict_1)
print(len(fpr), len(tpr), len(th))

spn += 1
plt.subplot(spr, spc, spn)
plt.plot(fpr, tpr)
for i, t in enumerate(th):
    pass
    plt.annotate(f'{t:.2f}', xy=[fpr[i], tpr[i]])

print(roc_auc_score(y_test, y_predict_1))

# learning curve
sep('learning curve')
li = list(np.linspace(0.1, 1, 6))
from sklearn.model_selection import learning_curve
xnowdt01 = datetime.datetime.now()
train_size, train_score, test_score = learning_curve(model, x_train, y_train,
                                                     train_sizes=li,
                                                     cv=4)
xnowdt02 = datetime.datetime.now()
xtimediff = xnowdt02 - xnowdt01
print(f'Time usage of learning_curve: {xtimediff.seconds:,d} sec')
print(train_size.shape, train_score.shape, test_score.shape)
print(train_size)
print(len(x_train))
print(li)
m_train = len(x_train)
print(m_train)
print(np.array(li) * float(m_train))
print(np.array(li) * float(m_train) * 0.75)
train_score_m = train_score.mean(axis=1)
test_score_m = test_score.mean(axis=1)
train_score_s = train_score.std(axis=1)
test_score_s = test_score.std(axis=1)

spn += 1
plt.subplot(spr, spc, spn)
plt.plot(train_size, train_score_m, 'bo-', zorder=100)
plt.plot(train_size, test_score_m, 'ro--', zorder=200)
plt.fill_between(train_size, train_score_m + train_score_s, train_score_m - train_score_s, color='g')
plt.fill_between(train_size, test_score_m + test_score_s, test_score_m - test_score_s, color='y')

# finally show all drawings
plt.show()
