#!/usr/bin/python3
# -*- coding: utf-8 -*-
# File  : dstoken.py
# Author: anyongjin
# Date  : 2020/9/7

import sys
from transformers import *
cur_module = sys.modules[__name__]


class Tokenizer:
    def __init__(self, model_dir, tokenizer=None, token_map_list=None, max_len=None):
        tokenizer_loader = AutoTokenizer
        if tokenizer:
            tokenizer_loader = getattr(cur_module, tokenizer)
        self.tokenizer = tokenizer_loader.from_pretrained(model_dir)
        self.vocab_size = self.tokenizer.vocab_size
        self.max_len = max_len
        self.token_map = None
        if token_map_list and hasattr(self.tokenizer, 'basic_tokenizer'):
            self._load_token_map(token_map_list)

    def _load_token_map(self, token_map):
        never_split = set()
        self.token_map = []
        for i, pair in enumerate(token_map):
            if i > 0 and pair[0] == ' ':
                raise Exception('blank character " " must be the first!')
            origin = pair[0].replace('\\n', '\n')
            clean_word = pair[1].strip()
            never_split.add(clean_word)
            rpl_text = ' ' + clean_word + ' '
            self.token_map.append((origin, rpl_text))
        self.tokenizer.basic_tokenizer.never_split = never_split

    def save_pretrained(self, save_directory):
        self.tokenizer.save_pretrained(save_directory)

    def text_preprocess(self, text):
        if self.token_map:
            for pair in self.token_map:
                text = text.replace(pair[0], pair[1])
        return text

    def tokenize(self, text):
        text = self.text_preprocess(text)
        return self.tokenizer.tokenize(text)

    def get_input_ids(self, text, **kwargs):
        tokens = self.tokenize(text)
        padding = 'do_not_pad'
        do_truncation = False
        if self.max_len and self.max_len > 1:
            padding = 'max_length'
            do_truncation = True
        default_args = {
            'padding': padding,
            'truncation': do_truncation,
            'return_tensors': 'np',
            'max_length': self.max_len
        }
        default_args.update(kwargs)
        encoded = self.tokenizer.encode(tokens, **default_args)
        return encoded[0]
