import torch
from torch.nn.utils.rnn import pad_sequence

from .textprocess_utils import TextProcess

from ..tokenizer.tokenization_dial import DialTokenizer, Seq2SeqDialTokenizer, GPT2DialTokenizer


class DialDataProcess(TextProcess):
    def __init__(self,
                 dataset_dir,
                 vocab_path,
                 flag="dial",
                 sample_min_sentence=2,
                 return_type_ids=True,
                 batch_first=True,
                 label_pad_id=-100):
        super(DialDataProcess, self).__init__(dataset_dir=dataset_dir, flag=flag)
        self.vocab_path = vocab_path
        self.sample_min_sentence = sample_min_sentence
        self.return_type_ids = return_type_ids
        self.batch_first = batch_first
        self.label_pad_id = label_pad_id

    def get_tokenizer(self) -> DialTokenizer:
        raise NotImplementedError(f'子类实现 tokenizer 方法')

    def sample_to_ids(self, sample):
        """ 将一个样本转化成ids """
        if (not isinstance(sample, list)) or (len(sample) < self.sample_min_sentence):
            return None
        tokenizer = self.get_tokenizer()
        return [tokenizer.convert_tokens_to_ids(tokenizer.tokenize(utterance)) for utterance in sample]

    def encode_sample_ids(self, sample_ids):
        """ 将一个样本ids 进行编码（如，input_ids, type_ids, label_ids） """
        tokenizer = self.get_tokenizer()
        return tokenizer.encode_ids(dialogue_ids=sample_ids,
                                    return_type_ids=self.return_type_ids,
                                    return_label_ids=True,
                                    return_dict=True)

    def batch_collate(self, batch):
        """ 整理每个batch 的数据 """
        if len(batch) < 1:
            raise ValueError(f"值有误，请检查")
        first_sample_ids = batch[0]
        output = []

        tokenizer = self.get_tokenizer()

        if tokenizer.input_ids_key in first_sample_ids:
            input_ids = pad_sequence(
                [torch.tensor(instance[tokenizer.input_ids_key], dtype=torch.long) for instance in batch],
                batch_first=self.batch_first,
                padding_value=tokenizer.pad_token_id)
            output.append(input_ids)

        if tokenizer.type_ids_key in first_sample_ids:
            type_ids = pad_sequence(
                [torch.tensor(instance[tokenizer.type_ids_key], dtype=torch.long) for instance in batch],
                batch_first=self.batch_first,
                padding_value=tokenizer.pad_token_id)
            output.append(type_ids)

        if tokenizer.trg_input_ids_key in first_sample_ids:
            trg_input_ids = pad_sequence(
                [torch.tensor(instance[tokenizer.trg_input_ids_key], dtype=torch.long) for instance in batch],
                batch_first=self.batch_first,
                padding_value=tokenizer.pad_token_id)
            output.append(trg_input_ids)

        if tokenizer.label_ids_key in first_sample_ids:
            label_ids = pad_sequence(
                [torch.tensor(instance[tokenizer.label_ids_key], dtype=torch.long) for instance in batch],
                batch_first=self.batch_first,
                padding_value=self.label_pad_id)
            output.append(label_ids)

        return output[0] if len(output) == 1 else tuple(output)


class Seq2SeqDialDataProcess(DialDataProcess):
    """ 处理seq2seq数据 """
    def __init__(self,
                 dataset_dir,
                 vocab_path,
                 flag="seq2seq",
                 max_len=256,
                 max_history=15,
                 return_type_ids=False,
                 batch_first=True,
                 do_lower_case=True,
                 label_pad_id=-100):
        self.max_len = max_len
        self.max_history = max_history
        super(Seq2SeqDialDataProcess, self).__init__(dataset_dir=dataset_dir,
                                                     vocab_path=vocab_path,
                                                     flag=flag,
                                                     sample_min_sentence=2,
                                                     return_type_ids=return_type_ids,
                                                     batch_first=batch_first,
                                                     label_pad_id=label_pad_id)

        self.tokenizer = Seq2SeqDialTokenizer(vocab_file=self.vocab_path,
                                              label_pad_id=self.label_pad_id,
                                              max_history=self.max_history,
                                              do_lower_case=do_lower_case,
                                              max_len=self.max_len)

    def get_tokenizer(self) -> DialTokenizer:
        return self.tokenizer


class GPT2DialDataProcess(DialDataProcess):
    def __init__(self,
                 dataset_dir,
                 vocab_path,
                 flag="gpt2",
                 max_len=512,
                 max_history=15,
                 label_mask_history=False,
                 return_type_ids=False,
                 batch_first=True,
                 label_pad_id=-100):
        self.max_len = max_len
        self.max_history = max_history
        self.label_mask_history = label_mask_history
        super(GPT2DialDataProcess, self).__init__(dataset_dir=dataset_dir,
                                                  vocab_path=vocab_path,
                                                  flag=flag,
                                                  sample_min_sentence=2,
                                                  return_type_ids=return_type_ids,
                                                  batch_first=batch_first,
                                                  label_pad_id=label_pad_id)

        self.tokenizer = GPT2DialTokenizer(vocab_file=self.vocab_path,
                                           label_mask_history=self.label_mask_history,
                                           label_pad_id=self.label_pad_id,
                                           max_history=self.max_history,
                                           max_len=self.max_len)

    def get_tokenizer(self) -> DialTokenizer:
        return self.tokenizer
