# -*- coding:utf8 -*-
# @Time : 2023/3/30 16:06
# @Author : WanJie Wu
import json
import time
import os.path
import functools
import numpy as np
from tqdm import tqdm
from loguru import logger
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler


def execute_time(param):
    """执行时间判定"""
    if callable(param):
        def wrapper(*args, **kwargs):
            start = time.time()
            logger.info(f"函数【{param.__name__}】开始执行, 时间为: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))}")
            result = param(*args, **kwargs)
            logger.info(f"函数【{param.__name__}】执行结束, 耗时: {round(time.time() - start, 3)}秒")
            return result
        return wrapper

    def decorator(func):
        @functools.wraps(func)
        def wrapper1(*args, **kwargs):
            start = time.time()
            logger.info(f"【{param}】开始执行, 时间为: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))}")
            result = func(*args, **kwargs)
            logger.info(f"【{param}】执行结束，耗时: {round(time.time() - start, 3)}秒")
            return result
        return wrapper1
    return decorator


class ReadClfDataSet(object):
    def __init__(self, data_dir, seg_model, vector_model):
        self.data_dir = data_dir
        self.seg_model = seg_model
        self.vector_model = vector_model
        self.label = self._read_label()

    def _read_label(self):
        clf = list()
        with open(os.path.join(self.data_dir, "label.txt"), "r", encoding="utf8") as f:
            for idx, line in enumerate(f.readlines()):
                line = line.strip()
                if not line:
                    continue
                clf.append(line)
        logger.info(f"训练类别为: {clf}")
        return {
            "label2id": {key: i for i, key in enumerate(clf)},
            "id2label": {i: key for i, key in enumerate(clf)},
        }

    def read_train_examples(self):
        train_x = []
        train_y = []
        with open(os.path.join(self.data_dir, "train.txt"), mode="r", encoding="utf8") as f:
            for line in tqdm(f.readlines()):
                line = json.loads(line.strip())
                words = self.seg_model.ss_segment(line["text"])
                feature = self.vector_model.sentence_vec([words])[0]
                train_x.append(feature.tolist())
                train_y.append(self.label["label2id"][line["class_name"]])
        return np.array(train_x), np.array(train_y)

    def read_dev_examples(self):
        train_x = []
        train_y = []
        with open(os.path.join(self.data_dir, "dev.txt"), mode="r", encoding="utf8") as f:
            for line in tqdm(f.readlines()):
                line = json.loads(line.strip())
                words = self.seg_model.ss_segment(line["text"])
                feature = self.vector_model.sentence_vec([words])[0]
                train_x.append(feature)
                train_y.append(self.label["label2id"][line["class_name"]])

        return np.array(train_x), np.array(train_y)

    def read_test_examples(self):
        train_x = []
        with open(os.path.join(self.data_dir, "test.txt"), mode="r", encoding="utf8") as f:
            for line in tqdm(f.readlines()):
                words = self.seg_model.ss_segment(line.strip())
                feature = self.vector_model.sentence_vec([words])[0]
                train_x.append(feature)
        return np.array(train_x)

    @property
    def train_data(self):
        train_x, train_y = self.read_train_examples()
        # train_y = train_y * 2 - 1
        # return train_x, train_y
        smote = SMOTE(sampling_strategy=0.5, random_state=42)
        x_resampled, y_resampled = smote.fit_resample(train_x, train_y)
        # rus = RandomUnderSampler(sampling_strategy='majority', random_state=42)
        # x_resampled, y_resampled = rus.fit_resample(x_resampled, y_resampled)
        return x_resampled, y_resampled

    @property
    def dev_data(self):
        dev_x, dev_y = self.read_dev_examples()
        # dev_y = dev_y * 2 - 1
        return dev_x, dev_y

    @property
    def test_data(self):
        return self.read_test_examples()


if __name__ == "__main__":
    from app.src.embedding import predict
    from app.src.embedding import nltk
    data_dir_out = "/data/output"
    rds = ReadClfDataSet(
        data_dir=data_dir_out,
        seg_model=nltk.SegmentJB(),
        vector_model=predict.Word2VecPred("/data/output/word2vec.bin")
    )
    print(rds.train_data[0].shape)