from typing import Optional, Tuple, List
from typeguard import check_argument_types
from pathlib import Path
import numpy as np
import librosa
import onnxruntime # 1.12.1
import logging
import os

# module_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), "utils", "function.py")
# sys.path.append(module_file)

from .function import make_pad_mask, mask_fill

class Encoder:
    def __init__(self, model_dir: str) -> None:
        self.model_dir = model_dir
    
    def __call__(self, wav_file: str) -> List:
        # wav_file = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/asr_conformer/speech_recognition/wav/BAC009S0764W0121.wav"
        speech, sr = librosa.load(str(wav_file), sr=16000)
        logging.debug(" ###### speech: ", speech.shape)
        """Inference
        Args:
            data: Input speech data
        Returns:
            text, token, token_int, hyp
        """
        assert check_argument_types()

        # check dtype
        if speech.dtype != np.float32:
            speech = speech.astype(np.float32)

        # data: (Nsamples,) -> (1, Nsamples)
        speech = speech[np.newaxis, :]
        # lengths: (1,)
        lengths = np.array([speech.shape[1]]).astype(np.int64)
        logging.debug(" ###### before Forward_Encoder: ", speech.shape, lengths.shape)

        # b. Forward Encoder
        encoder_out, encoder_out_lens = self.encoder_main(speech=speech, speech_length=lengths)
        if isinstance(encoder_out, tuple):
            encoder_out = encoder_out[0]
        assert len(encoder_out) == 1, len(encoder_out)

        return encoder_out, encoder_out_lens

    def frontend_stft(self, input: np.ndarray, ilens: np.ndarray = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        """STFT forward function.
        Args:
            input: (Batch, Nsamples)
            ilens: (Batch)
        Returns:
            output: (Batch, Frames, Freq, 2)
        """
        assert check_argument_types()
        n_fft, hop_length = 512, 128
        stft_kwargs = dict(
            n_fft=512, # self.config.n_fft,
            win_length=512, #self.config.win_length,
            hop_length=128, #self.config.hop_length,
            center=True, #self.config.center,
            window="hann" #self.config.window,
        )
        output = []
        # iterate over istances in a batch
        for i, instance in enumerate(input):
            stft = librosa.stft(input[i], **stft_kwargs)
            output.append(np.array(np.stack([stft.real, stft.imag], -1)))
        output = np.vstack(output).reshape(len(output), *output[0].shape)

        onesided, normalized, center  = True, False, True

        if not onesided: #self.config.onesided:
            len_conj = n_fft - output.shape[1]
            conj = output[:, 1: 1 + len_conj].flip(1)
            conj[:, :, :, -1].data *= -1
            output = np.concatenate([output, conj], 1)
        if normalized: # self.config.normalized:
            output = output * (stft_kwargs["window"].shape[0] ** (-0.5))

        # output: (Batch, Freq, Frames, 2=real_imag)
        # -> (Batch, Frames, Freq, 2=real_imag)
        output = output.transpose(0, 2, 1, 3)
        if ilens is not None:
            if center: # self.config.center:
                pad = n_fft // 2
                ilens = ilens + 2 * pad
            olens = (ilens - n_fft) // hop_length + 1
            output = mask_fill(output, make_pad_mask(
                olens, output, dim=1), 0.0)
        else:
            olens = None
        return output, olens

    def frontend_logmel(self, feat: np.ndarray, ilens: np.ndarray = None) -> Tuple[np.ndarray, np.ndarray]:
        ###############################

        mel_options = dict(sr=16000, n_fft=512, n_mels=80, fmin=0, fmax=8000.0, htk=False)
        log_base = None
        melmat = librosa.filters.mel(**mel_options)
        # self.melmat = melmat.T
        ###############################

        # feat: (B, T, D1) x melmat: (D1, D2) -> mel_feat: (B, T, D2)
        mel_feat = np.matmul(feat, melmat.T)#self.melmat)
        mel_feat = np.clip(mel_feat, 1e-10, float('inf'))

        if log_base is None:
            logmel_feat = np.log(mel_feat)
        elif log_base == 2.0:
            logmel_feat = np.log2(mel_feat)
        elif log_base == 10.0:
            logmel_feat = np.log10(mel_feat)
        else:
            logmel_feat = np.log(mel_feat) / np.log(log_base)

        # Zero padding
        logmel_feat = mask_fill(
            logmel_feat, make_pad_mask(ilens, logmel_feat, 1), 0.0)
        return logmel_feat, ilens

    def frontend_main(self, inputs: np.ndarray, input_length: np.ndarray):
        assert check_argument_types()
        # STFT
        input_stft, feats_lens = self.frontend_stft(inputs, input_length)
        logging.debug(" ###### after stft : ", inputs.shape, input_length.shape)
        # 3. STFT -> Power spectrum
        # h: ComplexTensor(B, T, F) -> torch.Tensor(B, T, F)
        input_power = input_stft[..., 0]**2 + input_stft[..., 1]**2

        # 4. Feature transform e.g. Stft -> Log-Mel-Fbank
        # input_power: (Batch, [Channel,] Length, Freq)
        #       -> input_feats: (Batch, Length, Dim)
        logging.debug(" ###### after stft input_power : ", input_power.shape, input_length.shape)
        input_feats, _ = self.frontend_logmel(input_power, feats_lens)
        logging.debug(" ###### after logmel : ", input_feats.shape, input_length.shape)
        return input_feats, feats_lens

    def global_mvn(self, x: np.ndarray, ilens: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        #######################
        stats_file = "/home/gyf/.cache/espnet_onnx/conformer/feats_stats.npz"
        stats_file = os.path.join(self.model_dir, "feats_stats.npz")
        stats_file = Path(stats_file)
        stats = np.load(stats_file)
        eps = 1.0e-20
        if isinstance(stats, np.ndarray):
            # Kaldi like stats
            count = stats[0].flatten()[-1]
            mean = stats[0, :-1] / count
            var = stats[1, :-1] / count - mean * mean
        else:
            # New style: Npz file
            count = stats["count"]
            sum_v = stats["sum"]
            sum_square_v = stats["sum_square"]
            mean = sum_v / count
            var = sum_square_v / count - mean * mean
        std = np.sqrt(np.maximum(var, eps))
        # mean = mean
        # std = std
        #######################
        """Forward function
        Args:
            x: (B, L, ...)
            ilens: (B,)
        """
        mask = make_pad_mask(ilens, x, 1)

        # feat: (B, T, D)
        if True: #norm_means:
            x -= mean
        x = mask_fill(x, mask, 0.0)

        if True: #norm_vars:
            x /= std

        return x, ilens

    def encoder_main(self, speech: np.ndarray, speech_length: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        ###########################
        model_path = "/home/gyf/.cache/espnet_onnx/conformer/full/xformer_encoder.onnx"
        # model_path = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/asr_conformer/pytorch/conformer_encoder.onnx"
        model_path = os.path.join(self.model_dir, "full", "conformer_encoder.onnx")
        providers = ["CUDAExecutionProvider"]#['CPUExecutionProvider']
        encoder = onnxruntime.InferenceSession(model_path, providers=providers)
        ###########################
        """Frontend + Encoder. Note that this method is used by asr_inference.py
        Args:
            speech: (Batch, Length, ...)
            speech_lengths: (Batch, )
        """
        # 1. Extract feature
        feats, feat_length = self.frontend_main(speech, speech_length)
        logging.debug(" ###### before gmvn: ", feats.shape, feat_length.shape)

        # 2. normalize with global MVN
        # if self.config.do_normalize:
        #     feats, feat_length = self.normalize(feats, feat_length)
        do_normalize = True
        if do_normalize:
            feats, feat_length = self.global_mvn(feats, feat_length)

        # if self.config.do_preencoder:
        #     feats, feats_lengths = self.preencoder(feats, feats_lengths)
        logging.debug(" ###### after gmvn, before forward_encoder : ", feats.shape, feat_length.shape)

        # 3. forward encoder
        # encoder_out, encoder_out_lens = \
        #     self.forward_encoder(feats, feat_length) 
        logging.debug(encoder.get_inputs()[0].name)

        encoder_out, encoder_out_lens = encoder.run(["encoder_out", "encoder_out_lens"], {
                    "feats": feats,
                    "feats_length": feat_length
                })
        # encoder_out = self.mask_output(encoder_out, encoder_out_lens)

        # if self.config.do_postencoder:
        #     encoder_out, encoder_out_lens = self.postencoder(
        #         encoder_out, encoder_out_lens
        #     )
        logging.debug(encoder_out, encoder_out_lens)
        return encoder_out, encoder_out_lens
