import json
from paddle.io import Dataset, Subset, DataLoader


class KeyDataset(Dataset):
    def __init__(self, dict_data):
        self.data = dict_data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.data[index]


class BaseData:
    def __init__(self, args, tokenizer):
        self.train_data, *id2label = self.read_file(args.train_file)
        if id2label:
            self.id2label = id2label[0]
        self.dev_data, *_ = self.read_file(args.dev_file) if args.dev_file else (None, None)
        self.predict_data, *_ = self.read_file(args.predict_file) if args.predict_file else (None, None)
        self.args = args
        self.tokenizer = tokenizer
        self.init_label()

    @staticmethod
    def read_file(file):
        return [json.loads(x) for x in open(file, encoding='utf-8')]

    def get_dataloader(self):
        ret = {'train': [], 'dev': []}
        base_dataset = KeyDataset(self.train_data)
        if self.dev_data is not None:
            train_datasets = [base_dataset]
            dev_datasets = [KeyDataset(self.dev_data)]
        else:
            train_datasets, dev_datasets = [], []
            if self.args.kfold > 1:
                from sklearn.model_selection import KFold
                data_split_idx = list(KFold(n_splits=self.args.kfold, shuffle=True,
                                            random_state=self.args.seed).split(range(len(self.train_data))))
            elif self.args.kfold == 1:
                from sklearn.model_selection import train_test_split
                data_split_idx = train_test_split(range(len(self.train_data)),
                                                  test_size=0.2,
                                                  random_state=self.args.seed)

            for train_idx, dev_idx in data_split_idx:
                train_dataset = Subset(base_dataset, train_idx)
                dev_dataset = Subset(base_dataset, dev_idx)
                train_datasets.append(train_dataset)
                dev_datasets.append(dev_dataset)
        for train_dataset, dev_dataset in zip(train_datasets, dev_datasets):
            train_dataloader = DataLoader(train_dataset,
                                          batch_size=self.args.batch_size,
                                          collate_fn=self.train_collate,
                                          num_workers=self.args.num_works,
                                          shuffle=True)
            dev_dataloader = DataLoader(dev_dataset,
                                        batch_size=self.args.batch_size * 2,
                                        collate_fn=self.dev_collate)
            ret['train'].append(train_dataloader)
            ret['dev'].append(dev_dataloader)
        return ret

    def get_predict_dataloader(self):
        predict_dataset = KeyDataset(self.predict_data)
        predict_dataloader = DataLoader(predict_dataset,
                                        batch_size=self.args.batch_size * 2,
                                        collate_fn=self.predict_collate)
        return predict_dataloader

    def init_label(self):
        return

