# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import time

import numpy as np
from ais_bench.infer.interface import InferSession
from swig_decoders import ctc_beam_search_decoder_batch, TrieVector, PathTrie

from constants import *
from utils import remove_duplicates_and_blank, load_vocab, logger

# Initialize "batch_root" and "trie_root" as global variables
# to adapt to the "ctc_beam_search_decoder_batch" function, which is a Python wrapper of C++ code.
batch_root = None
trie_root = None


class OnlineEncoder:
    def __init__(self, cfg):
        self.encoder = InferSession(cfg.get('device_id'), cfg.get('encoder_model_path'))
        self.vocab = load_vocab(cfg.get('asr_vocab'))
        self.init_encoder_state()
        self.warmup()
        self.silence_num_blank = cfg.get('silence_num_blank')

    def warmup(self):
        chunk_xs = np.random.randn(1, DECODING_WINDOW, FEATURE_DIM).astype(np.float32)
        for _ in range(10):
            self.encoder.infer([chunk_xs,
                                self.chunk_lens, self.offset, self.att_cache, self.cnn_cache, self.cache_mask])
        logger.debug('Encoder warmup done!')

    @staticmethod
    def init_trie():
        global batch_root, trie_root
        batch_root = TrieVector()
        trie_root = PathTrie()
        batch_root.push_back(trie_root)

    def init_encoder_state(self):
        self.offset = np.zeros((1, 1), dtype=np.int64)
        self.att_cache = np.zeros((1, NUM_LAYERS, NUM_HEADS, REQUIRED_CACHE_SIZE, OUTPUT_DIM),
                                  dtype=np.float32)
        self.cnn_cache = np.zeros((1, NUM_LAYERS, OUTPUT_SIZE, CNN_MODULE_KERNEL), dtype=np.float32)
        self.cache_mask = np.ones((1, 1, REQUIRED_CACHE_SIZE), dtype=np.float32)
        self.chunk_lens = np.ones(1, dtype=np.int32) * DECODING_WINDOW

        # states in one round of inference
        self.encoder_outs = []
        self.cached_feat = None
        self.num_chunks = 0
        self.init_trie()

    def reset(self):
        self.init_encoder_state()

    def __call__(self, chunk, is_end=False):
        if self.cached_feat is None:
            self.cached_feat = chunk
        else:
            self.cached_feat = np.concatenate([self.cached_feat, chunk], axis=0)

        num_frames = self.cached_feat.shape[0]
        if num_frames < DECODING_WINDOW and not is_end:
            return []
        if num_frames < CONTEXT:
            return []

        # Finish the remaining frames if this is the end of the speech.
        if is_end:
            left_frames = CONTEXT
        else:
            left_frames = DECODING_WINDOW

        end = 0
        hyps = []
        for cur in range(0, num_frames - left_frames + 1, DECODING_WINDOW):
            end = min(cur + DECODING_WINDOW, num_frames)
            x_chunk = self.cached_feat[cur:end, :]

            # padding if needed
            if end - cur < DECODING_WINDOW:
                num_pad_frames = DECODING_WINDOW - (end - cur)
                logger.debug(f'padding {num_pad_frames} frames')
                x_chunk = np.pad(x_chunk, ((0, num_pad_frames), (0, 0)), 'constant',
                                 constant_values=0)

            hyps = self.infer(x_chunk)

        self.cached_feat = self.cached_feat[end:, :]
        return hyps

    def infer(self, chunk):
        chunk_xs = np.expand_dims(chunk, axis=0)
        start = time.time()
        output = self.encoder.infer([chunk_xs, self.chunk_lens, self.offset, self.att_cache, self.cnn_cache,
                                     self.cache_mask])
        logger.debug(f'Encoder infer time: {(time.time() - start) * 1000:.2f} ms')
        log_probs, log_probs_idx, chunk_out, chunk_out_lens, \
            self.offset, self.att_cache, self.cnn_cache, self.cache_mask = output

        self.encoder_outs.append(chunk_out)
        self.num_chunks += 1
        logger.debug(f'current num_chunks: {self.num_chunks}')

        seq_start_flag = [True]
        hyps = ctc_beam_search_decoder_batch(log_probs.tolist(), log_probs_idx.tolist(), batch_root, seq_start_flag,
                                             BEAM_SIZE, NUM_BEAM_SEARCH_PROCESSES, BLANK_ID, SPACE_ID)[0]

        text = self.postprocess(hyps)
        logger.debug(f'Current decoding text: {text}')
        return hyps

    def postprocess(self, hyps):
        token_idx_list = remove_duplicates_and_blank(hyps[0][1])
        text = ''.join(self.vocab[token_idx_list])
        return text
