import copy

from torch.utils.data import Dataset
import pickle
import re
import numpy as np
import torch
from utils.functions import mask_trunk


class T5MLM(Dataset):
    def __init__(self, conf, tokenizer, corpfile, type="train"):
        super().__init__()

        self.tokenizer = tokenizer
        self.conf = conf
        self.type = type
        if self.type == "checktarget":
            self.corp = corpfile
        else:
            self.corp = pickle.load(open(corpfile, "rb"))
        if self.type == "dev":  # 在dev集上评测模型性能
            temp = np.array(self.corp)[:, 3:]
            self.corp = []
            for s in temp:
                for idx in [0, 1]:
                    s[idx] = s[idx].strip()
                    _y = re.findall("{['\w -]*}", s[idx])
                    for _idx in range(len(_y)):
                        s[idx] = s[idx].replace(_y[_idx], f"<extra_id_{_idx}>")
                    self.corp.append([s[idx], [1, 0][idx]])  # label为1表示是委婉语, label为0表示不是委婉语

    def __getitem__(self, item):

        if self.type == "train":
            s = self.corp[item][1].replace(self.corp[item][0], "<extra_id_0>")
            l = f"<extra_id_0>{self.corp[item][0]}<extra_id_1>"
            inputs = self.tokenizer(s, return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)
            labels = self.tokenizer(l, return_tensors="pt", max_length=self.conf.MAXLEN, padding="max_length",
                                    truncation=True)["input_ids"].squeeze()

            return inputs, labels
        elif self.type == "checktarget":
            s = self.corp[item][1].replace(self.corp[item][0], "<extra_id_0>")
            inputs = self.tokenizer(s, return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)
            return inputs
        elif self.type == "inf":  # 第二阶段的推理，三阶段应该也是这样的
            # **这里与checkpoint不同，因为需要把长度大于256的句子也处理好，所以在输入给dataloader之前就先处理完成了**
            inputs = self.tokenizer(self.corp[item][1], return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)
            return inputs
        elif self.type == "dev":  # 使用dev集做测试
            inputs = self.tokenizer(self.corp[item][0], return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)
            label = self.corp[item][1]
            return inputs, label

    def __len__(self):
        return len(self.corp)


class BERTBin(Dataset):
    def __init__(self, conf, tokenizer, corpfile="train", type="train", **kwargs):
        """

        :param conf:
        :param tokenizer:
        :param corpfile: train , inf 在train的模式下，选择加载训练集和开发集
        :param type: train， inf ，checktarget
        dataset的输出必须句子在前, 没有label就用false
        """
        super().__init__()
        self.tokenizer = tokenizer
        self.conf = conf
        self.type = type
        if self.type == "train":
            # self.corp = pickle.load(open(f"./corpus/step1train/bin{corpfile}5.pkl", "rb"))
            self.corp = pickle.load(open(f"./corpus/step1train/train_bin_mlm.pkl", "rb"))
            self.corp = self.corp[:int(len(self.corp) * 0.7)]
        elif self.type == "dev":
            self.corp = pickle.load(open(f"./corpus/step1train/train_bin_mlm.pkl", "rb"))
            self.corp = self.corp[int(len(self.corp) * 0.7):]
        elif self.type == "inf":  # 在inf模式下，直接输入数据集
            self.corp = corpfile
        elif self.type == "checktarget":
            self.corp = pickle.load(open(f"./corpus/target_corp.pkl", "rb"))[kwargs["impeuph"]]
            self.kw = kwargs

    def __getitem__(self, item):
        if self.type == "train":
            inputs = self.tokenizer(self.corp[item][0], return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)

            labels = self.corp[item][1]

            # -------tail--------------------------------
            tailinputs = self.tokenizer(self.corp[item][2], return_tensors="pt", max_length=self.conf.MAXLEN,
                                        padding="max_length", truncation=True)
            l_t = self.tokenizer(self.corp[item][3], return_tensors="pt", max_length=self.conf.MAXLEN,
                                 padding="max_length", truncation=True)['input_ids']
            taillabels = torch.where(tailinputs.input_ids == self.tokenizer.mask_token_id, l_t, -100)
            return inputs, labels, tailinputs, taillabels

        elif self.type == "dev":
            inputs = self.tokenizer(self.corp[item][0], return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)

            labels = self.corp[item][1]
            return inputs, labels

        elif self.type == "inf":
            inputs = self.tokenizer(self.corp[item][0], return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)
            return inputs, False

        elif self.type == "checktarget":
            s = " " + " ".join(self.corp[item]) + " "
            s = s.replace(" " + self.kw["impeuph"] + " ", " [MASK] ", 1)
            s.strip()
            sent = mask_trunk(s)
            inputs = self.tokenizer(sent, return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)
            return inputs, False

    def __len__(self):
        return len(self.corp)


class BERTMlm(Dataset):
    def __init__(self, conf, tokenizer, trainpath="./corpus/step2train/train_mlm6.pkl", type="train", impeuph=None):
        # type取值 train dev  inf
        super().__init__()
        self.tokenizer = tokenizer
        self.conf = conf
        self.type = type
        if self.type == "train":
            # self.corp = pickle.load(open(f"./corpus/step2train/train_mlm.pkl", "rb"))# 原来imp的训练集
            # self.corp = pickle.load(open(f"./corpus/step2train/train_mlm2.pkl", "rb"))  # 加上非委婉语的训练集
            # self.corp = pickle.load(open(f"./corpus/step2train/train_mlm3.pkl", "rb"))  # 8万训练样本 1:1
            # self.corp = pickle.load(open(f"./corpus/step2train/train_mlm4.pkl", "rb"))  # 8万训练样本 1:1:2
            # self.corp = pickle.load(open(f"./corpus/step2train/train_mlm5.pkl", "rb"))  # 加入了随机一半的mask
            # self.corp = pickle.load(open(f"./corpus/step2train/train_mlm6.pkl", "rb"))  # 6万训练样本 1:1:2 加入了随机一半的mask
            self.corp = pickle.load(open(trainpath, "rb"))
        elif self.type == "inf":
            self.corp = pickle.load(open(f"./corpus/step2inf/infinputs.pkl", "rb"))
        elif self.type == "dev":
            corp = pickle.load(open(f"./corpus/devset.pkl", "rb"))
            temp = np.array(corp)[:, 3:]
            self.corp = []
            for s in temp:
                for idx in [0, 1]:
                    s[idx] = s[idx].strip()
                    _y = re.findall("{['\w -]*}", s[idx])
                    for _idx in range(len(_y)):
                        _y_ = [each_y[1:-1] for each_y in _y]
                        _y_[_idx] = "[MASK]"
                        _s_idx = copy.deepcopy(s[idx])
                        for _idx_ in range(len(_y_)):
                            _s_idx = _s_idx.replace(_y[_idx_], _y_[_idx_], 1)
                        assert "[MASK]" in _s_idx
                        _s_idx = _s_idx.replace("[MASK]", " [MASK] ")
                        self.corp.append([_s_idx, [1, 0][idx]])  # label为1表示是委婉语, label为0表示不是委婉语

        elif self.type == "checktarget":
            self.impeuph = impeuph
            self.corp = pickle.load(open(f"./corpus/target_corp.pkl", "rb"))[self.impeuph]

    def __getitem__(self, item):
        if self.type == "train":
            inputs = self.tokenizer(self.corp[item][0], return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)
            l_ = self.tokenizer(self.corp[item][1], return_tensors="pt", max_length=self.conf.MAXLEN,
                                padding="max_length", truncation=True)['input_ids']
            labels = torch.where(inputs.input_ids == self.tokenizer.mask_token_id, l_, -100)

            # -------tail--------------------------------
            tailinputs = self.tokenizer(self.corp[item][2], return_tensors="pt", max_length=self.conf.MAXLEN,
                                        padding="max_length", truncation=True)
            l_t = self.tokenizer(self.corp[item][3], return_tensors="pt", max_length=self.conf.MAXLEN,
                                 padding="max_length", truncation=True)['input_ids']
            taillabels = torch.where(tailinputs.input_ids == self.tokenizer.mask_token_id, l_t, -100)
            return inputs, labels, tailinputs, taillabels

        elif self.type == "inf":
            sent = mask_trunk(self.corp[item][0])
            inputs = self.tokenizer(sent, return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)
            return inputs, False
        elif self.type == "dev":
            sent = mask_trunk(self.corp[item][0])
            inputs = self.tokenizer(sent, return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)
            labels = self.corp[item][1]
            return inputs, labels
        elif self.type == "checktarget":
            s = " " + " ".join(self.corp[item]) + " "
            s = s.replace(" " + self.impeuph + " ", " [MASK] ", 1)
            s.strip()
            sent = mask_trunk(s)
            inputs = self.tokenizer(sent, return_tensors="pt", max_length=self.conf.MAXLEN,
                                    padding="max_length", truncation=True)
            return inputs, False

    def __len__(self):
        return len(self.corp)
