import json
import torch
from torch.utils.data import DataLoader, Dataset


class MyDataset(Dataset):
    def __init__(self, config, mode, train_valid_split=0.8):
        super(MyDataset, self).__init__()
        assert mode in ['train', 'valid']
        self.mode = mode
        self.config = config
        self.data_list = self.process(self.config)
        self.split_point = int(len(self.data_list) * train_valid_split)
        self.train_data_list = self.data_list[0:self.split_point]
        self.valid_data_list = self.data_list[self.split_point:]
        self.num_chars = self.get_num_chars()

    def __getitem__(self, idx):
        if self.mode == 'train':
            tokens, label_ids = self.train_data_list[idx]['text'], self.get_label_ids(
                self.train_data_list[idx]['label'])
        elif self.mode == 'valid':
            tokens, label_ids = self.valid_data_list[idx]['text'], self.get_label_ids(
                self.valid_data_list[idx]['label'])
        encoded = self.config.tokenizer.encode_plus(tokens, max_length=self.config.maxlength, return_tensors='pt',
                                                    add_special_tokens=False, truncation_strategy="longest_first")
        return {'input_ids': encoded['input_ids'].squeeze(0), 'token_type_ids': encoded['token_type_ids'].squeeze(0),
                'attention_mask': encoded['attention_mask'].squeeze(0), 'label_ids': torch.tensor(label_ids)}

    def get_label_ids(self, labels: list):
        label_ids = []
        for label in labels:
            label_ids.append(self.config.label_to_ids[label])
        #         if len(label_ids)<400:
        #             label_ids = label_ids + [0]*(400-len(label_ids))
        return label_ids

    def process(self, config):
        data_list = []
        # 由于训练数据和验证数据比例划分不好 把训练数据和验证数据整合在一起重新划分
        with open(self.config.train_data_path, mode='r', encoding='utf-8') as f:
            for line in f.readlines():
                data_list.append(json.loads(line))

        with open(self.config.valid_data_path, mode='r', encoding='utf-8') as f:
            for line in f.readlines():
                data_list.append(json.loads(line))

        return data_list

    def get_num_chars(self):
        num_chars = 0
        if self.mode == 'train':
            for data in self.train_data_list:
                num_chars += len(data['text'])
        elif self.mode == 'valid':
            for data in self.valid_data_list:
                num_chars += len(data['label'])
        return num_chars

    def __len__(self):
        if self.mode == 'train':
            return len(self.train_data_list)
        elif self.mode == 'valid':
            return len(self.valid_data_list)


def get_dataloader(config, mode, num_workers=0):
    dataset = MyDataset(config, mode)
    epoch_size = len(dataset)
    return DataLoader(dataset, batch_size=config.batch_size, shuffle=True, num_workers=num_workers,
                      pin_memory=True), epoch_size


class DataPrefetcher(object):
    def __init__(self, loader):
        self.loader = iter(loader)
        self.stream = torch.cuda.Stream()
        self.preload()

    def preload(self):
        try:
            self.next_data = next(self.loader)
        except StopIteration:
            self.next_data = None
            return
        with torch.cuda.stream(self.stream):
            for k, v in self.next_data.items():
                if isinstance(v, torch.Tensor):
                    self.next_data[k] = self.next_data[k].cuda(non_blocking=True)

    def next(self):
        torch.cuda.current_stream().wait_stream(self.stream)
        data = self.next_data
        self.preload()
        return data
