import time
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer
from tqdm import tqdm


class MyDataset(Dataset):
    def __init__(self, config, mode=0):
        super(MyDataset, self).__init__()
        self.config = config
        self.mode = mode
        self.tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=config.model_path)
        # special_token_dict = {'additional_special_tokens': ['[X]']}
        # self.tokenizer.add_special_tokens(special_token_dict)
        self.data = []
        self.load()

    def load(self):
        path = self.config.train_path if self.mode else self.config.valid_path
        start = time.time()
        print(f'loadding data from {path}')
        with open(path, 'r', encoding='utf-8') as f:
            for i in tqdm(f):
                line = i.strip().split('\t')
                sample = {k: self.dealtitle(v) for k, v in
                          zip(['dataid', 'classes', 'ori', 'entailment'], line[:4]) if
                          len(line) == 4 and k in ['ori', 'entailment']}
                if sample: self.data.append(sample)
        end = time.time()
        print(f'load finished, cost:{int(end - start)} s')

    def dealtitle(self, title):
        title = title.strip().lstrip()
        title = title.replace('\r', '').replace('\n', '').replace('\r\n', '').replace(' ', '').replace('\t', '')
        return title

    def get_template(self, title):
        temp1 = '{}，它的意思是[MASK]。'
        temp2 = '{}，这句话表达的是[MASK]。'
        pad = '[PAD]' * len(title)
        return {'ori': temp1.format(title), 'tmp': temp1.format(pad)}, {'ori': temp2.format(title),
                                                                        'tmp': temp2.format(pad)}

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        line = self.data[item]
        ori = line.get('ori')[:252]
        sim = line.get('entailment')[:252]
        ori, ori1 = self.get_template(ori)
        sim, sim1 = self.get_template(sim)
        ori_ids = self.tokenizer.encode_plus(ori.get('ori'))
        ori_tmp_ids = self.tokenizer.encode_plus(ori.get('tmp'))
        ori1_ids = self.tokenizer.encode_plus(ori1.get('ori'))
        ori1_tmp_ids = self.tokenizer.encode_plus(ori1.get('tmp'))
        sim_ids = self.tokenizer.encode_plus(sim.get('ori'))
        sim_tmp_ids = self.tokenizer.encode_plus(sim.get('tmp'))
        sim1_ids = self.tokenizer.encode_plus(sim1.get('ori'))
        sim1_tmp_ids = self.tokenizer.encode_plus(sim1.get('tmp'))
        return {'ori_temp1': [ori_ids['input_ids'], ori_tmp_ids['input_ids']],
                'ori_temp2': [ori1_ids['input_ids'], ori1_tmp_ids['input_ids']],
                'sim_temp1': [sim_ids['input_ids'], sim_tmp_ids['input_ids']],
                'sim_temp2': [sim1_ids['input_ids'], sim1_tmp_ids['input_ids']]}


def collate_fn(batch):
    batch = list(filter(lambda x: x is not None, batch))
    max_sen = max([max([len(v[1]) for k,v in i.items()]) for i in batch])
    batch_data = {'ori': [], 'ori_tmp': [], 'ori1': [], 'ori1_tmp': [],
                  'sim': [], 'sim_tmp': [], 'sim1': [], 'sim1_tmp': []}
    for i in batch:
        ori, ori_tmp = i.get('ori_temp1')
        ori1, ori1_tmp = i.get('ori_temp2')
        sim, sim_tmp = i.get('sim_temp1')
        sim1, sim1_tmp = i.get('sim_temp2')
        batch_data['ori'].append(ori + [0] * (max_sen - len(ori)))
        batch_data['ori_tmp'].append(ori_tmp + [0] * (max_sen - len(ori_tmp)))
        batch_data['ori1'].append(ori1 + [0] * (max_sen - len(ori1)))
        batch_data['ori1_tmp'].append(ori1_tmp + [0] * (max_sen - len(ori1_tmp)))
        batch_data['sim'].append(sim + [0] * (max_sen - len(sim)))
        batch_data['sim_tmp'].append(sim_tmp + [0] * (max_sen - len(sim_tmp)))
        batch_data['sim1'].append(sim1 + [0] * (max_sen - len(sim1)))
        batch_data['sim1_tmp'].append(sim1_tmp + [0] * (max_sen - len(sim1_tmp)))
    return {k: torch.tensor(v) for k, v in batch_data.items()}


def GetDataloader(config, num_workers=2):
    train = MyDataset(config, mode=1)
    valid = MyDataset(config, mode=0)
    train_loader = DataLoader(train, batch_size=config.batch_size, shuffle=True, collate_fn=collate_fn,
                              num_workers=num_workers)
    valid_loader = DataLoader(valid, batch_size=config.batch_size, shuffle=True, collate_fn=collate_fn,
                              num_workers=num_workers)
    return train_loader, valid_loader

if __name__ == '__main__':
    from config import Config
    opt = Config()
    a,b = GetDataloader(opt)
    for i in a:
        print(i)
