import time
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer
from tqdm import tqdm


class MyDataset(Dataset):
    def __init__(self, config, mode=0):
        super(MyDataset, self).__init__()
        self.config = config
        self.mode = mode
        self.tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=config.Bert_path)
        self.data = []
        self.load()

    def load(self):
        path = self.config.train_path if self.mode else self.config.valid_path
        start = time.time()
        print(f'loadding data from {path}')
        with open(path, 'r', encoding='utf-8') as f:
            for i in tqdm(f):
                line = i.strip().split('\t')
                sample = {k: v for k, v in zip(['dataid', 'classes', 'ori', 'entailment'], line[:4]) if
                          len(line) == 4}

                if sample: self.data.append(sample)
        end = time.time()
        print(f'load finished, cost:{int(end - start)} s')

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        line = self.data[item]
        output = {}
        for k, v in line.items():
            out = self.tokenizer.encode_plus(v[:252])
            output[k] = out['input_ids']
        return output


def collate_fn(batch):
    # a,a+,a-,b,b+,b-...
    # a,a+,b,b+...
    batch = list(filter(lambda x: x is not None, batch))
    max_sen = max([max([len(v) for k, v in i.items()]) for i in batch])
    batch_data = []
    for i in batch:
        ori = i.get('ori')
        entailment = i.get('entailment')
        # contradiction = i.get('contradiction')
        batch_data.append(ori + [0] * (max_sen - len(ori)))
        batch_data.append(entailment + [0] * (max_sen - len(entailment)))
        # batch_data.append(contradiction + [0] * (max_sen - len(contradiction)))
    return torch.tensor(batch_data)


def GetDataloader(config, num_workers=2):
    train = MyDataset(config, mode=1)
    valid = MyDataset(config, mode=0)
    train_loader = DataLoader(train, batch_size=config.batch_size, shuffle=False, collate_fn=collate_fn,
                              num_workers=num_workers)
    valid_loader = DataLoader(valid, batch_size=config.batch_size, shuffle=False, collate_fn=collate_fn,
                              num_workers=num_workers)
    return train_loader, valid_loader
