from nlp_tools.tokenizer.base_tokenizer import ABCTokenizer
from typing import Dict,Any
import unicodedata


from transformers import AutoTokenizer,BertTokenizer
from keras.preprocessing.sequence import pad_sequences



class HuggingTokenizer(ABCTokenizer):
    """hugging tokenizer wrapper
    """

    def to_dict(self) -> Dict[str, Any]:
        data = super(HuggingTokenizer, self).to_dict()
        data['config']['model_path'] = self.model_path
        data['config']['need_attention_ids'] = self.need_attention_ids

        self.tokenizer.save_pretrained(self.model_path)
        return data

    def __init__(self, model_path, need_attention_ids=True):
        super().__init__()
        self.model_path = model_path
        self._tokenizer:BertTokenizer = AutoTokenizer.from_pretrained(model_path)

        self.need_attention_ids = need_attention_ids

    def change_model_path(self, new_model_path):
        self.model_path = new_model_path

    def tokenize(self, text, maxlen=None, add_special_tokens=True,**kwargs):
        text = self.tokenizer.tokenize(text,add_special_tokens=add_special_tokens,max_length=maxlen)
        return text

    def convert_tokens_to_string(self,tokens,skip_special_tokens=True):
        if skip_special_tokens:
            tokens = [word for word in tokens if word not in self.tokenizer.all_special_tokens]
        return self.tokenizer.convert_tokens_to_string(tokens).replace(" ","")

    def rematch(self, text, tokens):
        """给出原始的text和tokenize后的tokens的映射关系
        """

        normalized_text, char_mapping = '', []
        for i, ch in enumerate(text):
            ch = ''.join([
                c for c in ch
                if not (ord(c) == 0 or ord(c) == 0xfffd or self._is_control(c))
            ])
            normalized_text += ch
            char_mapping.extend([i] * len(ch))

        text, token_mapping, offset = normalized_text, [], 0
        for token in tokens:
            if self._is_special(token):
                token_mapping.append([])
            else:
                token = self.stem(token)
                start = text[offset:].index(token) + offset
                end = start + len(token)
                token_mapping.append(char_mapping[start:end])
                offset = end

        return token_mapping


    def encode(self,
                text_or_list,
                second_text=None,
                maxlen=None,
                is_split_into_words=False,
                add_special_tokens=True,
               **kwargs):
        if type(text_or_list) == str:
            text_or_list = [text_or_list]

        if type(text_or_list[0]) == str or is_split_into_words:
            tokener_dict = self.tokenizer(text_or_list,padding=True,truncation=True,return_tensors='np',max_length=maxlen,is_split_into_words=is_split_into_words,add_special_tokens=add_special_tokens,**kwargs)
        elif type(text_or_list[0]) == list:
            tokener_dict = self.list_encode(text_or_list)
        else:
            raise TypeError("不正确的输入格式")


        if not self.need_attention_ids:
            if 'attention_mask' in tokener_dict:
                del tokener_dict['attention_mask']
        return [value for key,value in tokener_dict.items()]


    def list_encode(self,text_list):
        tokened_texts = [[self.tokenizer.tokenize(sub_text)  for sub_text in text] for text in text_list]
        tokens_ids = []
        segment_ids = []
        attention_mask_ids = []

        for sub_tokens_list in tokened_texts:
            text_tokens = []
            text_segment = []
            text_attention_mask = []
            for index,sub_tokens in enumerate(sub_tokens_list):
                if index != 0:
                    sub_tokens = sub_tokens[1:]
                text_tokens.extend(sub_tokens)
                text_segment.extend([index%2]*len(sub_tokens))
                text_attention_mask.extend([1]*len(sub_tokens))
            text_ids = self.tokenizer.convert_tokens_to_ids(text_tokens)
            tokens_ids.append(text_ids)
            segment_ids.append(text_segment)
            attention_mask_ids.append(text_attention_mask)

        tokens_ids = pad_sequences(tokens_ids,padding="post",truncating='post')
        segment_ids = pad_sequences(segment_ids, padding="post", truncating='post')
        attention_mask_ids = pad_sequences(attention_mask_ids, padding="post", truncating='post')

        return {"input_ids":tokens_ids,"token_type_ids":segment_ids,"attention_mask_ids":attention_mask_ids}

    @staticmethod
    def stem(token):
        """获取token的“词干”（如果是##开头，则自动去掉##）
        """
        if token[:2] == '##':
            return token[2:]
        else:
            return token

    @staticmethod
    def _is_space(ch):
        """空格类字符判断
        """
        return ch == ' ' or ch == '\n' or ch == '\r' or ch == '\t' or \
               unicodedata.category(ch) == 'Zs'

    @staticmethod
    def _is_punctuation(ch):
        """标点符号类字符判断（全/半角均在此内）
        """
        code = ord(ch)
        return 33 <= code <= 47 or \
               58 <= code <= 64 or \
               91 <= code <= 96 or \
               123 <= code <= 126 or \
               unicodedata.category(ch).startswith('P')

    @staticmethod
    def _cjk_punctuation():
        return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\xb7\uff01\uff1f\uff61\u3002'

    @staticmethod
    def _is_cjk_character(ch):
        """CJK类字符判断（包括中文字符也在此列）
        参考：https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
        """
        code = ord(ch)
        return 0x4E00 <= code <= 0x9FFF or \
               0x3400 <= code <= 0x4DBF or \
               0x20000 <= code <= 0x2A6DF or \
               0x2A700 <= code <= 0x2B73F or \
               0x2B740 <= code <= 0x2B81F or \
               0x2B820 <= code <= 0x2CEAF or \
               0xF900 <= code <= 0xFAFF or \
               0x2F800 <= code <= 0x2FA1F

    @staticmethod
    def _is_control(ch):
        """控制类字符判断
        """
        return unicodedata.category(ch) in ('Cc', 'Cf')

    @staticmethod
    def _is_special(ch):
        """判断是不是有特殊含义的符号
        """
        return bool(ch) and (ch[0] == '[') and (ch[-1] == ']')




