import itertools
import matplotlib.pyplot as plt
import numpy as np
from sklearn import metrics
from imblearn.ensemble import (BalancedRandomForestClassifier)
from imblearn.pipeline import make_pipeline
from lightgbm import LGBMClassifier
from sklearn import preprocessing
from sklearn.ensemble import (RandomForestClassifier)
from sklearn.linear_model import LogisticRegression
from utils.general_utils import evaluate_metrics, timer, read_trainValTest_fromCSV

def plot_confusion_matrix(cm, classes, ax,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    print(cm)
    print('')

    ax.imshow(cm, interpolation='nearest', cmap=cmap)
    ax.set_title(title)
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.sca(ax)
    plt.yticks(tick_marks, classes)

    fmt = '.2f' if normalize else 'd'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        ax.text(j, i, format(cm[i, j], fmt),
                horizontalalignment="center",
                color="white" if cm[i, j] > thresh else "black")

    ax.set_ylabel('True label')
    ax.set_xlabel('Predicted label')

def print_metrics_results(pipeline_model, x_test, y_test, model_name, print_results=False, digits=3, whether_return_AIC=False):
    y_pred = pipeline_model.predict(x_test)
    y_pred_prob = pipeline_model.predict_proba(x_test)[:, 1]

    n = len(y_test)
    k = x_test.shape[1]
    return_AIC_params={
        'return_AIC': whether_return_AIC,
        'n':n,
        'k':k
    }
    metrics_dict = evaluate_metrics(y_test=y_test,
                     y_pred=y_pred,
                     y_pred_prob=y_pred_prob,
                     model_name=model_name,
                     print_results=print_results,
                     return_AIC=return_AIC_params)
    return metrics_dict

def construct_pipelines(cls_weight, model_list=('wlr', 'wrf')):
    # Instanciate a StandardScaler object
    stdscaler = preprocessing.StandardScaler()
    pipe_list = []
    if 'wlr' in model_list:
        # Classification using LogisticRegressionCV classifier with and without sampling
        # wlr = LogisticRegressionCV(cv=5, random_state=0, class_weight=cls_weight)
        wlr = LogisticRegression(random_state=0, class_weight=cls_weight)
        # Add one transformers and a sampler in the pipeline object
        pipeline_wlr = make_pipeline(stdscaler, wlr)
        pipe_list.append(pipeline_wlr)
    if 'wrf' in model_list:
        ###############################################################################
        # Classification using random forest classifier with and without sampling
        ###############################################################################
        wrf = RandomForestClassifier(
            n_estimators=100, max_depth=10, criterion='entropy', random_state=0, n_jobs=-1, class_weight=cls_weight)
        bwrf = BalancedRandomForestClassifier(random_state=0, n_jobs=-1, class_weight=cls_weight)
        # Add one transformers and a sampler in the pipeline object
        pipeline_wrf = make_pipeline(stdscaler, wrf)
        pipe_list.append(pipeline_wrf)
    if 'bwrf' in model_list:
        pipeline_bwrf = make_pipeline(stdscaler, bwrf)
        pipe_list.append(pipeline_bwrf)

    if 'wlgb' in model_list:
        ###############################################################################
        # Classification using lightGBM classifier with and without sampling
        ###############################################################################
        wlgb = LGBMClassifier(random_state=0, n_jobs=-1, class_weight=cls_weight)
        # Add one transformers and a sampler in the pipeline object
        pipeline_wlgb = make_pipeline(stdscaler, wlgb)
        pipe_list.append(pipeline_wlgb)
    return pipe_list
    # return pipeline_wlr, pipeline_wrf, pipeline_bwrf, pipeline_wlgb

@timer
def fit_one_pipeline(pipeline, X_train, y_train, model_name='--'):
    print(f'{model_name}:')
    pipeline.fit(X_train, y_train)

def fit_multiple_pipelines(pipeline_list, model_name_list, X_train, y_train):
    cnt_pips = len(pipeline_list)
    cnt_mode_names = len(model_name_list)
    if cnt_pips!=cnt_mode_names:
        raise Exception(f'pipeline_list和model_name_list参数的元素个数不匹配！{cnt_pips} !={cnt_mode_names}')
    # 多次调用fit_one_pipeline函数执行拟合
    for i in range(cnt_pips):
        fit_one_pipeline(pipeline_list[i], X_train, y_train, model_name_list[i])

###############################################################################
# Plot ROC Curve，使用新的RocCurveDisplay绘制roc曲线 in scikit-learn==1.0，废弃plot_roc_curve函数
###############################################################################
def plot_multiROCs_usePipelines(pipeline_list, model_name_list, X_test, y_test, show_roc=True):
    plt.figure(1)
    plt.plot([0, 1], [0, 1], 'k--')
    ax = plt.gca()
    ax.set_xlim(0, 1)
    ax.set_ylim(0, 1.01)
    cnt_pips = len(pipeline_list)
    cnt_mode_names = len(model_name_list)
    if cnt_pips != cnt_mode_names:
        raise Exception(f'pipeline_list和model_name_list参数的元素个数不匹配！{cnt_pips} !={cnt_mode_names}')
    fpr_tpr_dicts = {}
    for i in range(cnt_pips):
        current_modename = model_name_list[i]
        y_pred_prob = pipeline_list[i].predict_proba(X_test)[:, 1]
        fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_prob)
        fpr_tpr_dicts[current_modename]=(fpr, tpr)
        roc_auc = metrics.auc(fpr, tpr)
        display = metrics.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc, name = current_modename) # 消除警告：FutureWarning: `estimator_name` is deprecated in 1.7 and will be removed in 1.9. Use `name` instead.
        display.plot(ax=ax)
    if show_roc:
        plt.show()
    return fpr_tpr_dicts

def get_proc_lr_rf_data(is_shuffle=True,
                        whether_scaled=True,
                        scaled_base='x_train',
                        drop_column=None):
    data, train_data, val_data, test_data, nb_features = read_trainValTest_fromCSV(data_folder=None,
                                                                                   is_shuffle=is_shuffle,
                                                                                   whether_scaled=whether_scaled,
                                                                                   scaled_base=scaled_base,
                                                                                   drop_columname=drop_column)
    x_train, y_train_r, geoId_train_r = train_data[0], train_data[1], train_data[2]
    x_val, y_val_r, geoId_val_r = val_data[0], val_data[1], val_data[2]
    x_test, y_test_r, geoId_test_r = test_data[0], test_data[1], test_data[2]
    # 由于不需要验证集，这里直接将验证集和训练集合并
    x_train_con = np.concatenate([x_train, x_val], axis=0)
    y_train_r_con = np.concatenate([y_train_r, y_val_r], axis=0)
    geoId_train_r_con = np.concatenate([geoId_train_r, geoId_val_r], axis=0)

    del x_train, x_val, y_train_r, y_val_r
    return x_train_con, y_train_r_con, geoId_train_r_con, x_test, y_test_r, geoId_test_r