import logging

from .tokenization_utils import Tokenizer

logger = logging.getLogger(__name__)


class BertTokenizer(Tokenizer):
    def __init__(self,
                 **kwargs):
        super(BertTokenizer, self).__init__(**kwargs)
        # 对 token 进行检查
        if self.cls_token is None:
            raise ValueError(f" cls token 不能 为空，请检查")
        if self.cls_token not in self.vocab:
            raise ValueError(f" cls token 不在词典中，请检查")
        if self.sep_token is None:
            raise ValueError(f" sep token 不能 为空，请检查")
        if self.sep_token not in self.vocab:
            raise ValueError(f" sep token 不在词典中，请检查")

        if self.pad_token is None:
            raise ValueError(f" pad token 不能 为空，请检查")
        if self.pad_token not in self.vocab:
            raise ValueError(f" pad token 不在词典中，请检查")

        self.input_ids_key = 'input_ids'
        self.type_ids_key = 'type_ids'
        self.mask_ids_key = 'mask_ids'

    def encode(self,
               text1: str,
               text2: str = None,
               return_type_ids=False,
               return_mask_ids=False,
               return_dict=False,
               **kwargs):
        text1_ids = self.convert_tokens_to_ids(self.tokenize(text1))
        text2_ids = self.convert_tokens_to_ids(self.tokenize(text2)) if text2 else None

        return self.encode_ids(text1_ids=text1_ids,
                               text2_ids=text2_ids,
                               return_type_ids=return_type_ids,
                               return_mask_ids=return_mask_ids,
                               return_dict=return_dict,
                               **kwargs)

    def encode_ids(self,
                   text1_ids: [int],
                   text2_ids: [int] = None,
                   return_type_ids=False,
                   return_mask_ids=False,
                   return_dict=False,
                   **kwargs):
        text1_ids, text2_ids = self._sample_ids_cut_(text1_ids=text1_ids, text2_ids=text2_ids)

        input_ids = [self.cls_token_id] + text1_ids + [self.sep_token_id]
        if return_type_ids:
            type_ids = [0] * len(input_ids)
        else:
            type_ids = None

        if isinstance(text2_ids, list) and (len(text2_ids) > 0):
            input_ids = input_ids + text2_ids + [self.sep_token_id]
            if return_type_ids:
                type_ids = type_ids + [1] * (len(text2_ids) + 1)

        if return_mask_ids:
            mask_ids = [1 for _ in input_ids]
        else:
            mask_ids = None
        if return_dict:
            return self.output_dict(input_ids=input_ids, type_ids=type_ids, mask_ids=mask_ids)
        else:
            return self.output_tuple(input_ids=input_ids, type_ids=type_ids, mask_ids=mask_ids)

    def output_dict(self, input_ids=None, type_ids=None, mask_ids=None):
        output_dict = {self.input_ids_key: input_ids,
                       self.type_ids_key: type_ids,
                       self.mask_ids_key: mask_ids}
        return {k: v for k, v in output_dict.items() if v is not None}

    @classmethod
    def output_tuple(cls, input_ids=None, type_ids=None, mask_ids=None):
        output_list = [item for item in [input_ids, type_ids, mask_ids] if item is not None]
        return output_list[0] if len(output_list) == 1 else output_list

    def _sample_ids_cut_(self, text1_ids: [int], text2_ids: [int] = None):
        """ 将对话进行裁剪 """
        text2_ids_len = len(text2_ids) if isinstance(text2_ids, list) else 0
        if (len(text1_ids) + text2_ids_len + 3) <= self.max_len:
            return text1_ids, text2_ids
        token_len = 2 + len(text1_ids)
        if token_len >= self.max_len:
            return text1_ids[: self.max_len-2], []
        if isinstance(text2_ids, list):
            return text1_ids, text2_ids[: self.max_len - token_len -1]
        else:
            return text1_ids, text2_ids

