from pycox.datasets import metabric, nwtco, support, gbsg, flchain
from sklearn.preprocessing import KBinsDiscretizer, LabelEncoder, StandardScaler
import numpy as np
import pandas as pd
import pdb
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression

from .utils import LabelTransform
from imblearn.over_sampling import SMOTENC
from imblearn.over_sampling import ADASYN
from xgboost import XGBClassifier as XGBC
from catboost import CatBoostClassifier as CBC
from lightgbm import LGBMClassifier as LGBMC
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import precision_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression as LR
from sklearn.metrics import roc_auc_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
def load_data(config):
    '''load data, return updated configuration.
    '''
    data = config['data']
    assert data in ["metabric", "nwtco", "support", "gbsg", "flchain", "seer",], "Data Not Found!"
    if data == "seer":
        # data processing, transform all continuous data to discrete
        PATH_DATA = "./data/attention_data_all.csv"
        # PATH_DATA = "./data/noNum.csv"
        df = pd.read_csv(PATH_DATA)
        df.rename(columns={'Age recode with <1 year olds': 'Age recode'}, inplace=True)
        cols_categorical = ["Sex", "Year of diagnosis", "Race recode (W, B, AI, API)", "Histologic Type ICD-O-3",
                            "Age recode", "Sequence number", "Marital status at diagnosis",
                            "Primary Site - labeled", "Summary stage 2000 (1998-2017)",
                            "Reason no cancer-directed surgery", "Grade (thru 2017)",
                            "Derived AJCC T, 7th ed (2010-2015)", "Derived AJCC N, 7th ed (2010-2015)",
                            "Chemotherapy recode (yes, no/unk)", "Surg Prim Site", "tumor size"]
        cols_standardize = ["Regional nodes examined (1988+)", "Total number of benign/borderline tumors for patient",
                            "Total number of in situ/malignant tumors for patient", ]
        # cols_standardize = []
        df_feat = df.drop(["Mets at DX-liver (2010+)"], axis=1)
        df_feat_standardize = df_feat[cols_standardize]
        df_feat_standardize_disc = StandardScaler().fit_transform(df_feat_standardize)
        df_feat_standardize_disc = pd.DataFrame(df_feat_standardize_disc, columns=cols_standardize)

        # must be categorical feature ahead of numerical features!
        df_feat = pd.concat([df_feat[cols_categorical], df_feat_standardize_disc, df["Mets at DX-liver (2010+)"]],
                            axis=1)
        # df_feat = df
        vocab_size = 0
        for _, feat in enumerate(cols_categorical):
            df_feat[feat] = LabelEncoder().fit_transform(df_feat[feat]).astype(float) + vocab_size
            vocab_size = df_feat[feat].max() + 1

        df_train, df_test, df_y_train, df_y_test = train_test_split(df_feat.iloc[:, 0:-1], df_feat.iloc[:, -1],
                                                                    test_size=0.2, random_state=49)

        # 使用ADASYN进行不平衡处理（只处理训练集不处理测试集）
        # print("before ADASYN\n",pd.Series(df_y_train).value_counts())
        # ada = ADASYN(random_state=49)
        # df_train, df_y_train = ada.fit_resample(df_train, df_y_train)
        # print("after ADASYN\n", pd.Series(df_y_train).value_counts())
        print("before SMOTENC ORIGIN\n", pd.Series(df_feat.iloc[:, -1]).value_counts())
        #使用SMOTENC
        print("before SMOTENC\n", pd.Series(df_y_train).value_counts())
        snc = SMOTENC(random_state=49, categorical_features=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
        df_train, df_y_train = snc.fit_resample(df_train, df_y_train)
        print("after SMOTENC\n", pd.Series(df_y_train).value_counts())


        rfc = RandomForestClassifier()
        xgb = XGBC()
        svc = SVC()
        dtc = DecisionTreeClassifier()
        knn = KNeighborsClassifier()
        gbc = GradientBoostingClassifier()
        cbc = CBC()
        lgbm = LGBMC()
        rfc = rfc.fit(df_train, df_y_train)
        xgb = xgb.fit(df_train, df_y_train)
        svc = svc.fit(df_train, df_y_train)
        dtc = dtc.fit(df_train, df_y_train)
        knn = knn.fit(df_train, df_y_train)
        gbc = gbc.fit(df_train, df_y_train)
        cbc = cbc.fit(df_train, df_y_train)
        lgbm = lgbm.fit(df_train, df_y_train)
        score_r = rfc.score(df_test, df_y_test)
        score_x = xgb.score(df_test, df_y_test)
        score_s = svc.score(df_test, df_y_test)
        score_d = dtc.score(df_test, df_y_test)
        score_k = knn.score(df_test, df_y_test)
        score_gbc = gbc.score(df_test, df_y_test)
        score_cbc = cbc.score(df_test, df_y_test)
        score_lgbm = lgbm.score(df_test, df_y_test)
        print("Random Forest:{}".format(score_r))
        print("XGBoost:{}".format(score_x))
        print("SVC:{}".format(score_s))
        print("DTM:{}".format(score_d))
        print("KNN:{}".format(score_k))
        print("GBC:{}".format(score_gbc))
        print("CBC:{}".format(score_cbc))
        print("LGBM:{}".format(score_lgbm))
        print("---------------------------------------")
        print("rfc_auc_score", roc_auc_score(df_y_test, rfc.predict(df_test)))
        print("xgboost_auc_score", roc_auc_score(df_y_test, xgb.predict(df_test)))
        print("svm_auc_score", roc_auc_score(df_y_test, svc.predict(df_test)))
        print("dtc_auc_score", roc_auc_score(df_y_test, dtc.predict(df_test)))
        print("knn_auc_score", roc_auc_score(df_y_test, knn.predict(df_test)))
        print("gbc_auc_score", roc_auc_score(df_y_test, gbc.predict(df_test)))
        print("cbc_auc_score", roc_auc_score(df_y_test, cbc.predict(df_test)))
        print("lgbm_auc_score", roc_auc_score(df_y_test, lgbm.predict(df_test)))
        print("---------------------------------------")
        print("rfc_recall_score", recall_score(df_y_test, rfc.predict(df_test)))
        print("xgboost_recall_score", recall_score(df_y_test, xgb.predict(df_test)))
        print("svm_recall_score", recall_score(df_y_test, svc.predict(df_test)))
        print("dtc_recall_score", recall_score(df_y_test, dtc.predict(df_test)))
        print("knn_recall_score", recall_score(df_y_test, knn.predict(df_test)))
        print("gbc_recall_score", recall_score(df_y_test, gbc.predict(df_test)))
        print("cbc_recall_score", recall_score(df_y_test, cbc.predict(df_test)))
        print("lgbm_recall_score", recall_score(df_y_test, lgbm.predict(df_test)))
        print("---------------------------------------")
        print("rfc_precision_score", precision_score(df_y_test, rfc.predict(df_test)))
        print("xgboost_precision_score", precision_score(df_y_test, xgb.predict(df_test)))
        print("svm_precision_score", precision_score(df_y_test, svc.predict(df_test)))
        print("dtc_precision_score", precision_score(df_y_test, dtc.predict(df_test)))
        print("knn_precision_score", precision_score(df_y_test, knn.predict(df_test)))
        print("gbc_precision_score", precision_score(df_y_test, gbc.predict(df_test)))
        print("cbc_precision_score", precision_score(df_y_test, cbc.predict(df_test)))
        print("lgbm_precision_score", precision_score(df_y_test, lgbm.predict(df_test)))
        print("---------------------------------------")
        print("rfc_f1_score", f1_score(df_y_test, rfc.predict(df_test)))
        print("xgboost_f1_score", f1_score(df_y_test, xgb.predict(df_test)))
        print("svm_f1_score", f1_score(df_y_test, svc.predict(df_test)))
        print("dtc_f1_score", f1_score(df_y_test, dtc.predict(df_test)))
        print("knn_f1_score", f1_score(df_y_test, knn.predict(df_test)))
        print("gbc_f1_score", f1_score(df_y_test, gbc.predict(df_test)))
        print("cbc_f1_score", f1_score(df_y_test, cbc.predict(df_test)))
        print("lgbm_f1_score", f1_score(df_y_test, lgbm.predict(df_test)))
        # 从train中（已进行不平衡处理）分10%给验证集
        df_val = df_train.sample(frac=0.1)
        df_y_val = df_y_train[df_val.index]
        df_train = df_train.drop(df_val.index)
        df_y_train = df_y_train.drop(df_val.index)

        config['num_numerical_feature'] = int(len(cols_standardize))
        config['num_categorical_feature'] = int(len(cols_categorical))
        config['num_feature'] = int(len(df_train.columns))
        config['vocab_size'] = int(vocab_size)
        return df, df_train, df_y_train, df_test, df_y_test, df_val, df_y_val