import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from python_ai.common.xcommon import sep
pd.set_option('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', 1000, 'display.expand_frame_repr', False)

# init drawings
spr = 1
spc = 2
spn = 0
plt.figure(figsize=[12, 6])

sep()
df = pd.read_csv(r'../../../../large_data/ML2/breast-cancer-wisconsin.data.csv',
                 header=None,  # ATTENTION
                 )
print(len(df))
print(df.head())
sep()
print(df.info())
sep()
print(df.describe())

sep('df[0] is id')
print(df[0].value_counts()[:10])
del df[0]

sep('df[6] ?=>1')
print(df[6].value_counts())
print(df.loc[20:30, 6])
df[6].replace('?', 1, inplace=True)
print(df.loc[20:30, 6])

sep('df[10] 2=>0, 4=>1')
print(df[10].value_counts())
df[10] = df[10].map({2:0, 4:1})
print(df.loc[:10, 10])

sep('balance')
print(df[10].value_counts())
df1 = df[df[10] == 1].sample(n=217)  # ATTENTION df.sample()
df = pd.concat([df, df1])
print(df[10].value_counts())

sep('split')
x = df.iloc[:, :-1]
y = df.iloc[:, -1]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, random_state=666)

sep('grid search')
fixed_params = dict(solver='liblinear',
                    max_iter=1000)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
estimator = LogisticRegression(**fixed_params)
params = dict(C=[0.1, 0.2, 0.5, 1])
grid = GridSearchCV(estimator, params,
                    cv=5)
grid.fit(x_train, y_train)
print(f'best score = {grid.best_score_}')
print(f'best params = {grid.best_params_}')

sep('model')
model = LogisticRegression(**fixed_params, **(grid.best_params_))
model.fit(x_train, y_train)
h_test = model.predict(x_test)
proba_test = model.predict_proba(x_test)

from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score
sep('confusion matrix')
print(confusion_matrix(y_test, h_test))
sep('classification rpt')
print(classification_report(y_test, h_test))

sep('roc')
fpr, tpr, thred = roc_curve(y_test, proba_test[:, 1])
xlen = len(thred)
group = xlen // 10
spn += 1
plt.subplot(spr, spc, spn)
plt.plot(fpr, tpr)
for i, th in enumerate(thred):
    if not (0 == i % group or i == xlen - 1):
        continue
    plt.annotate(f'{th:.2f}', xy=(fpr[i], tpr[i]))
print('roc_auc_score', roc_auc_score(y_test, proba_test[:, 1]))

sep('learning curve')
from sklearn.model_selection import learning_curve
train_size, train_score, test_score = learning_curve(model, x, y,
                                                     train_sizes=np.linspace(0.1, 1.0, 5),
                                                     cv=4)
print(train_size.shape, train_score.shape, test_score.shape)
print(train_size)
print(np.linspace(0.1, 1.0, 5))
print(len(x))
print(float(len(x)) * np.array(np.linspace(0.1, 1.0, 5)) * 0.75)

train_score_m = train_score.mean(axis=1)
test_score_m = test_score.mean(axis=1)
train_score_s = train_score.std(axis=1)
test_score_s = test_score.std(axis=1)

spn += 1
plt.subplot(spr, spc, spn)
plt.plot(train_size, train_score_m, 'b-', zorder=200)
plt.plot(train_size, test_score_m, 'r--', zorder=150)
plt.fill_between(train_size, train_score_m - train_score_s, train_score_m + train_score_s,
                 color='g', zorder=300, alpha=0.1)  # ATTENTION alpha
plt.fill_between(train_size, test_score_m - test_score_s, test_score_m + test_score_s,
                 color='y', zorder=300, alpha=0.1)

# finally show all drawings
plt.show()
