# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import time

import numpy as np
from ais_bench.infer.interface import InferSession

from constants import *
from utils import add_sos_eos, remove_duplicates_and_blank, load_vocab, logger


class RescoringDecoder:
    def __init__(self, cfg):
        self.decoder = InferSession(cfg.get('device_id'), cfg.get('decoder_model_path'))
        self.warmup()
        self.vocab = load_vocab(cfg.get('asr_vocab'))
        self.sos_id, self.eos_id = len(self.vocab) - 1, len(self.vocab) - 1

    def __call__(self, hyps, encoder_outs):
        return self.infer(hyps, encoder_outs)

    def warmup(self):
        feats_len, tokens_len = 256, 50
        encoder_outs = np.random.randn(1, feats_len, OUTPUT_SIZE).astype(np.float32)
        encoder_out_lens = np.array([feats_len], dtype=np.int32)
        hyps_pad_sos_eos = np.zeros((1, BEAM_SIZE, tokens_len)).astype(np.int64)
        r_hyps_pad_sos_eos = np.zeros((1, BEAM_SIZE, tokens_len)).astype(np.int64)
        hyps_lens_sos = np.zeros((1, BEAM_SIZE)).astype(np.int32)
        ctc_scores = np.zeros((1, BEAM_SIZE)).astype(np.float32)

        for _ in range(10):
            self.decoder.infer(
                [encoder_outs, encoder_out_lens, hyps_pad_sos_eos, hyps_lens_sos, r_hyps_pad_sos_eos, ctc_scores],
                "dymshape",
                custom_sizes=8)

        logger.debug('Decoder warmup done!')

    def preprocess(self, hyps):
        max_hyps_len = max([len(x[1]) for x in hyps])
        logger.debug(f'max_hyps_len: {max_hyps_len}')
        # pad each element hyps to max_hyps_len -> [beam_size, max_hyps_len]
        hyps_cands = np.array(
            [list(x[1]) + [IGNORE_ID] * (max_hyps_len - len(x[1])) for x in hyps])
        # reverse each element in hyps and pad to max_hyps_len -> [beam_size, max_hyps_len]
        r_hyps_cands = np.array(
            [list(reversed(x[1])) + [IGNORE_ID] * (max_hyps_len - len(x[1])) for x in hyps])

        hyps_pad_sos_eos, _ = add_sos_eos(hyps_cands, self.sos_id, self.eos_id, IGNORE_ID)
        r_hyps_pad_sos_eos, _ = add_sos_eos(r_hyps_cands, self.sos_id, self.eos_id, IGNORE_ID)
        return hyps_pad_sos_eos, r_hyps_pad_sos_eos

    def infer(self, hyps, encoder_outs):
        logger.debug(f'encoder_features shape: {encoder_outs.shape}')
        if encoder_outs.shape[1] > MAX_SEQ_LEN:
            logger.warning(f'The feature sequence (encoder_outs.shape[1]) {encoder_outs.shape[1]} '
                           f'is longer than the maximum length {MAX_SEQ_LEN}, thus truncated.')
            encoder_outs = encoder_outs[:, :MAX_SEQ_LEN, :]

        hyps_pad_sos_eos, r_hyps_pad_sos_eos = self.preprocess(hyps)
        ctc_scores = np.array([x[0] for x in hyps])  # [beam_size,]
        hyps_lens = np.array([len(hyp) for hyp in hyps_pad_sos_eos])

        # prepare correct input shape for decoder
        encoder_out_lens = np.array([encoder_outs.shape[1]], dtype=np.int32)
        hyps_pad_sos_eos = np.expand_dims(hyps_pad_sos_eos, axis=0)
        r_hyps_pad_sos_eos = np.expand_dims(r_hyps_pad_sos_eos, axis=0)
        hyps_lens_sos = np.expand_dims(hyps_lens, axis=0).astype(np.int32)
        ctc_scores = np.expand_dims(ctc_scores, axis=0).astype(np.float32)

        start = time.time()
        ctc_best_idx = self.decoder.infer(
            [encoder_outs, encoder_out_lens, hyps_pad_sos_eos, hyps_lens_sos, r_hyps_pad_sos_eos, ctc_scores],
            "dymshape", custom_sizes=8)[0][0]
        logger.debug(f'Decoder infer time: {(time.time() - start) * 1000:.2f} ms')

        text = self.postprocess(hyps, ctc_best_idx)
        logger.debug(f'Decoder output: {text}')
        return text

    def postprocess(self, hyps, idx=0):
        token_idx_list = remove_duplicates_and_blank(hyps[idx][1])
        text = ''.join(self.vocab[token_idx_list])
        return text
