# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
import os
from typing import List, Tuple

import numpy as np
import torchaudio.compliance.kaldi as kaldi
import yaml


def add_sos_eos(ys_pad: np.ndarray, sos: int, eos: int,
                ignore_id: int) -> Tuple[np.ndarray, np.ndarray]:
    """Add <sos> and <eos> labels.

    Args:
        ys_pad (np.ndarray): batch of padded target sequences (B, Lmax)
        sos (int): index of <sos>
        eos (int): index of <eeos>
        ignore_id (int): index of padding

    Returns:
        ys_in (np.ndarray) : (B, Lmax + 1)
        ys_out (np.ndarray) : (B, Lmax + 1)

    Examples:
        >>> sos_id = 10
        >>> eos_id = 11
        >>> ignore_id = -1
        >>> ys_pad
        array([[ 1,  2,  3,  4,  5],
               [ 4,  5,  6, -1, -1],
               [ 7,  8,  9, -1, -1]], dtype=int32)
        >>> ys_in,ys_out=add_sos_eos(ys_pad, sos_id , eos_id, ignore_id)
        >>> ys_in
        array([[10,  1,  2,  3,  4,  5],
               [10,  4,  5,  6, 11, 11],
               [10,  7,  8,  9, 11, 11]])
        >>> ys_out
        array([[ 1,  2,  3,  4,  5, 11],
               [ 4,  5,  6, 11, -1, -1],
               [ 7,  8,  9, 11, -1, -1]])
    """
    _sos = np.array([sos])
    _eos = np.array([eos])
    ys = [y[y != ignore_id] for y in ys_pad]
    ys_in = [np.concatenate([_sos, y], axis=0) for y in ys]
    ys_out = [np.concatenate([y, _eos], axis=0) for y in ys]
    return pad_list(ys_in, eos), pad_list(ys_out, ignore_id)


def pad_list(xs: List[np.ndarray], pad_value: int):
    """Perform padding for the list of arrays.

    Args:
        xs (List): List of arrays [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
        pad_value (float): Value for padding.

    Returns:
        array: Padded array (B, Tmax, `*`).

    Examples:
        >>> x = [np.ones(4), np.ones(2), np.ones(1)]
        >>> x
        [array([1., 1., 1., 1.]), array([1., 1.]), array([1.])]
        >>> pad_list(x, 0)
        array([[1., 1., 1., 1.],
               [1., 1., 0., 0.],
               [1., 0., 0., 0.]])

    """
    n_batch = len(xs)
    max_len = max([x.shape[0] for x in xs])
    pad = np.zeros((n_batch, max_len), dtype=xs[0].dtype) + pad_value
    for i in range(n_batch):
        pad[i, :xs[i].shape[0]] = xs[i]

    return pad


def remove_duplicates_and_blank(token_idx_list):
    """Remove consecutive duplicate tokens and blanks."""
    res = []
    cur = 0
    BLANK_ID = 0
    while cur < len(token_idx_list):
        if token_idx_list[cur] != BLANK_ID:
            res.append(token_idx_list[cur])
        prev = cur
        while cur < len(token_idx_list) and token_idx_list[cur] == token_idx_list[prev]:
            cur += 1
    return res


def compute_fbank(waveform,
                  sample_rate,
                  num_mel_bins=80,
                  frame_length=25,
                  frame_shift=10,
                  dither=0.0):
    """Extract filterbank feature."""
    AMPLIFY_FACTOR = 1 << 15
    waveform = waveform * AMPLIFY_FACTOR
    mat = kaldi.fbank(waveform,
                      num_mel_bins=num_mel_bins,
                      frame_length=frame_length,
                      frame_shift=frame_shift,
                      dither=dither,
                      energy_floor=0.0,
                      sample_frequency=sample_rate)
    return mat


def load_vocab(txt_path):
    """Load vocabulary from txt file."""
    vocabulary = []
    len_of_valid_format = 2
    with open(txt_path, 'r') as fin:
        for line in fin:
            arr = line.strip().split()
            # Format for each line: token id
            if len(arr) != len_of_valid_format:
                raise ValueError(f"Invalid line: {line}. Expect format: token id")
            vocabulary.append(arr[0])
    return np.array(vocabulary)


def normalize(audio_samples, target_db=-20, max_gain_db=300.0):
    """Normalize audio samples to the target dB.

    :param audio_samples: Audio samples to normalize.
    :param target_db: Target RMS value in decibels. This value should be
                      less than 0.0 as 0.0 is full-scale audio.
    :type target_db: float
    :param max_gain_db: Max amount of gain in dB that can be applied for
                        normalization. This is to prevent nans when
                        attempting to normalize a signal consisting of
                        all zeros.
    :type max_gain_db: float
    :raises ValueError: If the required gain to normalize the segment to
                        the target_db value exceeds max_gain_db.
    """

    def gain_db(sample, gain):
        """Apply gain (in dB) to sample.

        :param sample: Sample to apply gain to.
        :param gain: Gain in decibels to apply to samples.
        :type gain: float|1darray
        """
        return sample * 10.0 ** (gain / 20.0)

    def get_rms_db(samples):
        """compute the root mean square energy of the samples in decibels.

        :param samples: Samples to compute root mean square of.
        :rtype: float
        """
        # square root => multiply by 10 instead of 20 for dBs
        mean_square = np.mean(samples ** 2)
        return 10 * np.log10(mean_square)

    rms_db = get_rms_db(audio_samples)
    if rms_db == -np.inf:
        return audio_samples

    gain = target_db - rms_db
    if gain > max_gain_db:
        raise ValueError(f'Unable to normalize the audio samples to target_db: {target_db}')

    return gain_db(audio_samples, min(max_gain_db, target_db - rms_db))


def load_yaml(path):
    with open(path, 'r') as f:
        cfg = yaml.safe_load(f)
    return cfg


def check_config(cfg):
    if cfg.get('encoder_model_path') is None or not os.path.exists(cfg.get('encoder_model_path')):
        raise ValueError(f'[ASR module] Invalid "encoder_model_path": {cfg.get("encoder_model_path")}')
    if cfg.get('decoder_model_path') is None or not os.path.exists(cfg.get('decoder_model_path')):
        raise ValueError(f'[ASR module] Invalid "decoder_model_path": {cfg.get("decoder_model_path")}')
    if cfg.get('punc_model_path') is None or not os.path.exists(cfg.get('punc_model_path')):
        raise ValueError(f'[ASR module] Invalid "punc_model_path": {cfg.get("punc_model_path")}')
    if cfg.get('punc_vocab') is None or not os.path.exists(cfg.get('punc_vocab')):
        raise ValueError(f'[ASR module] Invalid "punc_vocab": {cfg.get("punc_vocab")}')
    if cfg.get('punc_tokenizer') is None or not os.path.exists(cfg.get('punc_tokenizer')):
        raise ValueError(f'[ASR module] Invalid "punc_tokenizer": {cfg.get("punc_tokenizer")}')
    if cfg.get('asr_vocab') is None or not os.path.exists(cfg.get('asr_vocab')):
        raise ValueError(f'[ASR module] Invalid "asr_vocab": {cfg.get("asr_vocab")}')
    if cfg.get('device_id') is None or not isinstance(cfg.get('device_id'), int):
        raise ValueError(f'[ASR module] Invalid "device_id": {cfg.get("device_id")}')
    if cfg.get('vad_model_path') is None or not os.path.exists(cfg.get('vad_model_path')):
        raise ValueError(f'[ASR module] Invalid "vad_model_path": {cfg.get("vad_model_path")}')
    if cfg.get('silence_standby_time') is None or not isinstance(cfg.get('silence_standby_time'), float):
        raise ValueError(f'[ASR module] Invalid "silence_standby_time": {cfg.get("silence_standby_time")}')
    if cfg.get('vad_start_threshold') is None or not isinstance(cfg.get('vad_start_threshold'), float):
        raise ValueError(f'[ASR module] Invalid "vad_start_threshold": {cfg.get("vad_start_threshold")}')
    if cfg.get('vad_end_threshold') is None or not isinstance(cfg.get('vad_end_threshold'), float):
        raise ValueError(f'[ASR module] Invalid "vad_end_threshold": {cfg.get("vad_end_threshold")}')


class SingletonLogger:
    _instance = None

    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            cls._instance = super(SingletonLogger, cls).__new__(cls, *args, **kwargs)
        return cls._instance

    def __init__(self):
        self.logger = logging.getLogger('ASR engine')
        handler = logging.StreamHandler()
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        self.logger.addHandler(handler)
        self.logger.setLevel(logging.INFO)


logger = SingletonLogger().logger
