import torch
from torch.nn.utils.rnn import pad_sequence

from .textprocess_utils import TextProcess
from ..tokenizer.tokenization_bert import BertTokenizer


class ClassificationDataProcess(TextProcess):
    def __init__(self,
                 dataset_dir,
                 vocab_path,
                 flag="cls",
                 return_type_ids=False,
                 return_mask_ids=False,
                 batch_first=True,
                 max_len=128,
                 do_lower_case=True):
        super(ClassificationDataProcess, self).__init__(dataset_dir=dataset_dir, flag=flag)
        self.vocab_path = vocab_path
        self.return_type_ids = return_type_ids
        self.return_mask_ids = return_mask_ids
        self.batch_first = batch_first

        self.tokenizer = BertTokenizer(vocab_file=vocab_path, max_len=max_len, do_lower_case=do_lower_case)

    def sample_to_ids(self, sample):
        """ 最后一个为 label，长度不能小于2 """
        if (not isinstance(sample, list)) or (len(sample) < 2):
            return None
        if len(sample) == 2:
            text1, label = sample
            text2 = None
        else:
            text1, text2 = sample[:2]
            label = sample[-1]
        tokenizer = self.tokenizer
        text1_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text1))
        text2_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text2)) if text2 else None
        label = int(label)
        return [text1_ids, text2_ids, label]

    def encode_sample_ids(self, sample_ids):
        """ 将一个样本ids 进行编码（如，input_ids, type_ids, label_ids） """
        text1_ids, text2_ids, label = sample_ids
        tokenizer = self.tokenizer
        output = tokenizer.encode_ids(text1_ids=text1_ids,
                                      text2_ids=text2_ids,
                                      return_type_ids=self.return_type_ids,
                                      return_mask_ids=self.return_mask_ids,
                                      return_dict=True)
        output['label_ids'] = label
        return output

    def batch_collate(self, batch):
        """ 整理每个batch 的数据 """
        if len(batch) < 1:
            raise ValueError(f"值有误，请检查")
        first_sample_ids = batch[0]

        output = {}
        tokenizer = self.tokenizer

        if tokenizer.input_ids_key in first_sample_ids:
            input_ids = pad_sequence(
                [torch.tensor(instance[tokenizer.input_ids_key], dtype=torch.long) for instance in batch],
                batch_first=self.batch_first,
                padding_value=tokenizer.pad_token_id)
            output['input_ids'] = input_ids

        if tokenizer.type_ids_key in first_sample_ids:
            type_ids = pad_sequence(
                [torch.tensor(instance[tokenizer.type_ids_key], dtype=torch.long) for instance in batch],
                batch_first=self.batch_first,
                padding_value=0)
            output['type_ids'] = type_ids

        if tokenizer.mask_ids_key in first_sample_ids:
            mask_ids = pad_sequence(
                [torch.tensor(instance[tokenizer.mask_ids_key], dtype=torch.long) for instance in batch],
                batch_first=self.batch_first,
                padding_value=0)
            output['mask_ids'] = mask_ids

        label_ids = torch.tensor([instance['label_ids'] for instance in batch], dtype=torch.long)
        output['label_ids'] = label_ids

        return output


