from typing import Union, List, Tuple
from mindnlp.dataset import load_dataset as load
from mindspore.dataset import transforms
import mindspore as ms
import mindspore.dataset as ds
from mindspore.dataset import GeneratorDataset
from mindspore import Tensor
import numpy as np
import random
from mindnlp.transformers import BertTokenizer

def load_dataset(name: Union[str, List, Tuple]):
    if isinstance(name, (tuple, list)):
        dst = load(*name)
    else:
        dst = load(name)
    if isinstance(dst, dict):
        print("dataset splits: ", dst.keys())
        print("type(dst): ", type(dst['train']))
        print("The first data in train dataset:")
        for data in dst['train'].create_tuple_iterator():
            print("sentence1: ", data[0])
            print("sentence2: ", data[1])
            break
        for data in dst['train'].create_dict_iterator():
            print("dataset key: ", data.keys())
            break
        print("The first data in test dataset:")
        for data in dst['test'].create_tuple_iterator():
            print("sentence1: ", data[0])
            print("sentence2: ", data[1])
            break
    else:
        print("sentence: ", next(dst.create_dict_iterator()))
    return dst

def get_processed_dataset(args, name, tokenizer: BertTokenizer, batch_size: int, model = None):
    """
    Get processed dataset.

    Args:
        name (`str`): the name of dataset. 
    """
    task_to_keys = {
        "cola": ("sentence", None),
        "mnli": ("premise", "hypothesis"),
        "mnli_matched": ("premise", "hypothesis"),
        "mrpc": ("sentence1", "sentence2"),
        "qnli": ("question", "sentence"),
        "qqp": ("question1", "question2"),
        "rte": ("sentence1", "sentence2"),
        "sst2": ("sentence", None),
        "stsb": ("sentence1", "sentence2"),
        "wnli": ("sentence1", "sentence2"),
        "bookcorpus": ("text", )
    }
    def tokenize_function(*inputs):
        outputs = tokenizer(*inputs, truncation=True, max_length=512)
        return outputs
    def logits_function(*inputs):
        ms_inputs = []
        for tmp_input in inputs:
            ms_inputs.append(Tensor.from_numpy(tmp_input))
        outputs = model(*ms_inputs)
        return inputs[0], inputs[1], inputs[2], outputs.logits.asnumpy()
    def mask(input):
        replaced_idx = [np.random.choice(input.shape[0], max(1, int(input.shape[0] * 0.15)), replace=False)]
        if random.random() < 0.8:
            input[replaced_idx] = tokenizer.mask_token_id
        else:
            if random.random() < 0.5:
                ...
            else:
                input[replaced_idx] = random.choice(list(tokenizer.get_vocab().values()))
        return input 
    type_cast_op = transforms.TypeCast(ms.int32)

    dst = load_dataset(name)
    dataset_name = None
    if isinstance(name, (list, tuple)):
        dataset_name = name[-1]
    else:
        dataset_name = name
    cols = task_to_keys[dataset_name]
    cols = list(cols)

    if dataset_name in ["bookcorpus", ]:
        data_ratio = 0.0005
        num_samples = int(dst.get_dataset_size() * data_ratio)
        sampler = ds.RandomSampler(num_samples=num_samples)
        dst.use_sampler(sampler)
        print("dataset length: ", dst.get_dataset_size())

    if isinstance(dst, dict):
        print("dataset length: ", dst['train'].get_dataset_size())
        for key in dst.keys():
            dst[key] = dst[key].map(operations=tokenize_function, input_columns=cols, output_columns=['input_ids', 'attention_mask'])
            dst[key] = dst[key].map(operations=[type_cast_op, ], input_columns="label", output_columns='labels')
            dst[key] = dst[key].padded_batch(batch_size, pad_info={'input_ids': (None, tokenizer.pad_token_id),
                                                                    'attention_mask': (None, 0)})
        print("train_dst: ", dst['train'])
        print(next(dst['train'].create_tuple_iterator()))
        dataset = {
            'train': dst['train'],
            'val': dst['test']
        }
        print("processed dataset col names: ", dataset['train'].get_col_names())
    else:
        dst = dst.map(operations=tokenize_function, input_columns=cols, output_columns=['input_ids', 'attention_mask'])
        if 'label' in dst.get_col_names():
            dst = dst.map(operations=[type_cast_op, ], input_columns="label", output_columns='labels')
        if dataset_name in ["bookcorpus", ]:
            dst = dst.map(operations=[mask, ], input_columns=["input_ids", ], output_columns=["input_ids", ])
        dst = dst.padded_batch(batch_size, pad_info={'input_ids': (None, tokenizer.pad_token_id),
                                                                    'attention_mask': (None, 0)})
        print("dataset: ", dst.get_col_names())
        print(next(dst.create_tuple_iterator()))
        dataset = {
            'train': dst,
            'val': None
        }

    return dataset


if __name__ == "__main__":
    get_processed_dataset("mrpc", None)