# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import re
import time

import numpy as np
from ais_bench.infer.interface import InferSession
from transformers import AutoTokenizer

from utils import logger


class PunctuationModel:
    def __init__(self, cfg):
        self.model = InferSession(cfg.get('device_id'), cfg.get('punc_model_path'))
        self.tokenizer = AutoTokenizer.from_pretrained(cfg.get('punc_tokenizer'))

        self._punc_list = []
        with open(os.path.join(cfg.get('punc_vocab')), 'r', encoding='utf-8') as f:
            for line in f:
                self._punc_list.append(line.strip())

        # warmup
        self('生命在于运动')
        logger.debug('Punctuator warmup done!')

    def _clean_text(self, text):
        text = text.lower()
        text = re.sub('[^A-Za-z0-9\u4e00-\u9fa5]', '', text)
        text = re.sub(f'[{"".join([p for p in self._punc_list][1:])}]', '', text)
        return text

    def preprocess(self, text):
        clean_text = self._clean_text(text)
        if len(clean_text) == 0:
            return None
        tokenized_input = self.tokenizer(list(clean_text), return_length=True, is_split_into_words=True)
        input_ids = tokenized_input['input_ids']
        seg_ids = tokenized_input['token_type_ids']
        seq_len = tokenized_input['length'][0]
        return input_ids, seg_ids, seq_len

    def infer(self, input_ids, seg_ids):
        input_ids = np.array([input_ids]).reshape([1, len(input_ids)]).astype('int64')
        seg_ids = np.array([seg_ids]).reshape([1, len(seg_ids)]).astype('int64')
        start = time.time()
        output = self.model.infer([input_ids, seg_ids],
                                  "dymshape",
                                  custom_sizes=1000
                                  )
        logger.debug(f'Punctuator infer time: {(time.time() - start) * 1000} ms')
        return output

    def postprocess(self, input_ids, seq_len, preds):
        tokens = self.tokenizer.convert_ids_to_tokens(input_ids[1:seq_len - 1])
        labels = preds[1:seq_len - 1].tolist()

        text = ''
        for t, l in zip(tokens, labels):
            text += t
            if l != 0:
                text += self._punc_list[l]
        return text

    def __call__(self, text):
        input_ids, seg_ids, seq_len = self.preprocess(text)
        preds = self.infer(input_ids=input_ids, seg_ids=seg_ids)[0]
        text = self.postprocess(input_ids, seq_len, preds)
        return text
