import sys
sys.path.append("")
from model import clf_model

import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split

from matplotlib import pyplot as plt
import seaborn as sns


from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix

from imblearn.over_sampling import SMOTE
from collections import Counter
from memory_profiler import profile
import time

EPOCHES = 128
X_PATH = "data/sentence_codes_4096_dm0.npy" # 预训练模型
ORIG_PATH = "data/train.csv"

def timer(func):
    def _warp(*args, **kwargs):
        """
        :param args: func需要的位置参数
        :param kwargs: func需要的关键字参数
        :return: 函数的执行结果
        """
        start_time = time.time()
        result = func(*args, **kwargs)
        elastic_time = time.time() - start_time
        print("The execution time of the function '%s' is %.6fs" % (
            func.__name__, elastic_time))
        return result

    return _warp

# 载入数据
@timer
def prepare_data():
    # 第一步，载入数据
    data = np.load(X_PATH) # 每个句子的向量编码
    label_names = [pd.read_csv(ORIG_PATH).columns[2]]
    label = pd.read_csv(ORIG_PATH).loc[:, label_names[0]].to_numpy().reshape(
        -1, 1)
    return data, label

# 做数据增强
@timer
def argue_data(data, label):
    augmentator = SMOTE(sampling_strategy=0.3)
    orig_len = len(label)
    X_res, y_res = augmentator.fit_resample(data, label)
    y_res = y_res.reshape(-1, 1)
    pos_index = np.where(label == 1)[0]

    return orig_len, X_res, y_res, pos_index

# 训练集和测试集分割
@timer
def _train_test_split(orig_len, X_res, y_res, pos_index):
    train_X_pos, train_y_pos = X_res[orig_len:], y_res[orig_len:]
    X_res, y_res = X_res[:orig_len], y_res[:orig_len]
    test_X_pos, test_y_pos = X_res[pos_index], y_res[pos_index]
    X_neg, y_neg = np.delete(X_res, pos_index, axis=0), np.delete(y_res,
                                                                  pos_index,
                                                                  axis=0)
    

    train_X_neg, test_X_neg, train_y_neg, test_y_neg = train_test_split(
        X_neg, y_neg, test_size=0.25)
    
    train_X, train_y = np.concatenate([train_X_pos, train_X_neg],
                                      axis=0), np.concatenate(
                                          [train_y_pos, train_y_neg], axis=0)
    
    return train_X, train_y, test_X_pos, test_X_neg, test_y_pos, test_y_neg

@timer
def concate_shuffle_data(train_X, train_y, test_X_pos, test_X_neg, test_y_pos, test_y_neg):
    temp = np.concatenate([train_X, train_y], axis=1)
    np.random.shuffle(temp)
    train_X, train_y = temp[:, :-1], temp[:, -1]
 
    test_X, test_y = np.concatenate([test_X_pos, test_X_neg],
                                    axis=0), np.concatenate(
                                        [test_y_pos, test_y_neg], axis=0)
    temp = np.concatenate([test_X, test_y], axis=1)
    np.random.shuffle(temp)
    test_X, test_y = temp[:, :-1], temp[:, -1]

    return train_X, train_y, test_X, test_y


@timer
def validation(test_X, test_y, clf):
    y_pred = clf.predict_proba(test_X)
    conf_mtx = confusion_matrix(test_y, np.around(y_pred).astype("int64"))
    
    print('AUC:\t', roc_auc_score(test_y, y_pred))
    print(classification_report(test_y, np.around(y_pred).astype("int64")))
    print(conf_mtx)
    
    plt.figure()
    sns.heatmap(conf_mtx, annot=True, cmap="crest", fmt='.20g')
    plt.show()

@profile(precision=4, stream=open("logs/{}.txt".format(time.strftime("03-%Y-%m-%d %H %M %S")), 'w'))
def main():
    data, label = prepare_data()
    orig_len, X_res, y_res, pos_index = argue_data(data, label)
    
    train_X, train_y, test_X_pos, test_X_neg, test_y_pos, test_y_neg = \
        _train_test_split(orig_len, X_res, y_res, pos_index)
    
    train_X, train_y, test_X, test_y = concate_shuffle_data(
        train_X, train_y, test_X_pos, test_X_neg, test_y_pos, test_y_neg
    )

    print('train set length:\t', len(train_y))
    print('test set length:\t', len(test_y))

    del data, X_res, y_res, pos_index, test_X_pos, test_X_neg, test_y_pos, test_y_neg
    # return 0

    clf = clf_model(epoches=EPOCHES, verbose=1, validation=(test_X, test_y))
    clf.fit(train_X, train_y)
    
    validation(test_X, test_y, clf)
    
    

if __name__ == "__main__":
    main()