import sys
sys.path.append("")
import pandas as pd
import numpy as np
from utils.stratify import stratified_train_test_split
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
from torch.utils.data import DataLoader
import time

def timer(func):
    def _warp(*args, **kwargs):
        """
        :param args: func需要的位置参数
        :param kwargs: func需要的关键字参数
        :return: 函数的执行结果
        """
        start_time = time.time()
        result = func(*args, **kwargs)
        elastic_time = time.time() - start_time
        print("The execution time of the function '%s' is %.6fs" % (
            func.__name__, elastic_time))
        return result

    return _warp

class data_builder():
    def __init__(self, X_PATH, ORIG_PATH) -> None:
        self.X_PATH = X_PATH
        self.ORIG_PATH = ORIG_PATH

    # 载入数据
    @timer
    def prepare_data(self, col=None):
        # 第一步，载入数据        
        data = np.load(self.X_PATH) # 每个句子的向量编码
        if(col == None):
            label = pd.read_csv(self.ORIG_PATH).iloc[:, 2:].to_numpy()
        else:
            label_names = [pd.read_csv(self.ORIG_PATH).columns[col]]
            label = pd.read_csv(self.ORIG_PATH).loc[:, label_names[0]].to_numpy().reshape(
                -1, 1)
        return data[:1000], label[:1000]

    # 做数据增强
    @timer
    def argue_data(self, data, label):
        augmentator = SMOTE(sampling_strategy=0.3)
        orig_len = len(label)
        X_res, y_res = augmentator.fit_resample(data, label)
        y_res = y_res.reshape(-1, 1)
        pos_index = np.where(label == 1)[0]

        return orig_len, X_res, y_res, pos_index

    # 训练集和测试集分割
    @timer
    def _train_test_split(self, orig_len, X_res, y_res, pos_index):
        train_X_pos, train_y_pos = X_res[orig_len:], y_res[orig_len:]
        X_res, y_res = X_res[:orig_len], y_res[:orig_len]
        
        test_X_pos, test_y_pos = X_res[pos_index], y_res[pos_index]
        X_neg, y_neg = np.delete(X_res, pos_index, axis=0), np.delete(y_res,
                                                                    pos_index,
                                                                    axis=0)
        # 数据划分
        train_X_neg, test_X_neg, train_y_neg, test_y_neg = train_test_split(
            X_neg, y_neg, test_size=0.25)
        
        # 制造train_X和train_Y
        train_X, train_y = np.concatenate([train_X_pos, train_X_neg],
                                        axis=0), np.concatenate(
                                            [train_y_pos, train_y_neg], axis=0)
        temp = np.concatenate([train_X, train_y], axis=1)
        np.random.shuffle(temp) 
        train_X, train_y = temp[:, :-1], temp[:, -1]
        

        # 制造test_X和tesy_Y
        test_X, test_y = np.concatenate([test_X_pos, test_X_neg],
                                        axis=0), np.concatenate(
                                            [test_y_pos, test_y_neg], axis=0)
        temp = np.concatenate([test_X, test_y], axis=1)
        np.random.shuffle(temp)
        test_X, test_y = temp[:, :-1], temp[:, -1]
        
        return train_X, train_y, test_X, test_y
    
    def build_one(self, col):
        data, label = self.prepare_data(col) 
        orig_len, X_res, y_res, pos_index = self.argue_data(data, label)
        
        train_X, train_y, test_X, test_y = \
            self._train_test_split(orig_len, X_res, y_res, pos_index)
        
        return  train_X, train_y, test_X, test_y
    
    def build_graph(self, data):
        return {
            "flow_x": np.array(data)
        }

    # 返回值 x (n, 4) 4维的词向量，
    # y（6, n）6维的y向量
    def build_graph_data(self):
        data, label = self.prepare_data()
        train_X, test_X, train_y, test_y = stratified_train_test_split(data, label, 0.2, epochs=500, random_state=1234)

        train_data = np.concatenate((train_X, train_y), 1)
        train_loader = DataLoader(train_data, batch_size=64, shuffle=True)

        # test_data = np.concatenate((test_X, test_y), 1)
        # test_loader = DataLoader(test_data, batch_size=64, shuffle=False)

        return train_loader, test_X, test_y

if __name__ == "__main__":
    X_PATH = "data/sentence_codes_4096_dm0.npy" # 预训练模型
    ORIG_PATH = "data/train.csv"
    builder = data_builder(X_PATH, ORIG_PATH)
    builder.build_all()