#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# Copyright 2022 Moonzoo (Taylor Guo)
# 
#  Apache 2.0  (http://www.apache.org/licenses/LICENSE-2.0)

# https://github.com/Masao-Someki/espnet_onnx
# onnx converter
# from curses import noecho
# from pyexpat import features
from espnet2.bin.asr_inference import Speech2Text
from espnet_onnx.export import ASRModelExport

import torch

# def convert_espnet_onnx(model_zip=None, tag_name=None):
#     model_zip = "asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp_valid.acc.ave.zip"
#     tag_name = "conformer"
#     onnx_export = ASRModelExport()
#     onnx_export.export_from_zip(model_zip, tag_name=tag_name, quantize=False)

def convert_espnet_onnx(model_zip=None, tag_name=None):
    model_zip = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/docs/cmcc_2022/conformer_espnet/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp_valid.acc.ave.zip"
    tag_name = "conformer"
    onnx_export = ASRModelExport()
    onnx_export.set_export_config(max_seq_len=2048)
    onnx_export.export_from_zip(model_zip, tag_name=tag_name, quantize=False)

convert_espnet_onnx()


# espnet_onnx inference
import soundfile, librosa
from espnet_onnx import Speech2Text

wav_file = "wav/BAC009S0764W0121.wav"
# wav_file = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/asr_conformer/speech_recognition/wav/BAC009S0764W0121.wav"
# onnx_dir = "C:/Users/taylorguo/.cache/espnet_onnx/conformer"
onnx_dir = "/home/gyf/.cache/espnet_onnx/conformer"
# onnx_dir = "C:\\Users\\taylorguo\\.cache\\espnet_onnx\\conformer"

def inference_espnet_onnx(wav_file, onnx_dir):
    y, sr = librosa.load(wav_file, sr=16000)
    # y, _ = soundfile.read(wav_file)
    # speech2text = Speech2Text(model_dir=onnx_dir)
    speech2text = Speech2Text(tag_name="conformer")
    nbest = speech2text(y)
    print(nbest[0][0])

inference_espnet_onnx(wav_file, onnx_dir)

# onnxruntime inference

import onnxruntime
import numpy as np


def mask_fill(arr, mask, mask_value):
    arr[mask.astype(np.bool) == True] == mask_value
    return arr

def make_pad_mask(lengths, xs=None, dim=-1, xs_shape=None):
    
    if xs is not None:
        base = np.zeros(xs.shape)
    else:
        base = np.zeros((len(lengths), max(lengths)))
    # print(__file__, "make_pad_mask():  ", xs.shape, base.shape)
    if len(base.shape) == 3 and dim == 1:
        base = base.transpose(0, 2, 1)
    # print(type(lengths), lengths)
    for i in range(len(base)):
        base[i][..., lengths[i]:] = 1

    if len(base.shape) == 3 and dim == 1:
        base = base.transpose(0, 2, 1)

    return base


def global_mvn(features, features_len):
    """Apply global mean and variance normalization

    Args:
        stats_file: npy file
        norm_means: Apply mean normalization
        norm_vars: Apply var normalization
        eps:
    """
    stats_file = "C:\\Users\\taylorguo\\Documents\\biren\\cmcc_model_2022\\conformer\\models\\exp\\asr_stats_raw_sp\\train\\feats_stats.npz"
    stats_file = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/docs/cmcc_2022/conformer_espnet/exp/asr_stats_raw_sp/train/feats_stats.npz"
    stats = np.load(stats_file)

    eps = 1.0e-20

    if isinstance(stats, np.ndarray):
        # Kaldi like stats
        count = stats[0].flatten()[-1]
        mean = stats[0, :-1] / count
        var = stats[1, :-1] / count - mean * mean
    else:
        # New style: Npz file
        count = stats["count"]
        sum_v = stats["sum"]
        sum_square_v = stats["sum_square"]
        mean = sum_v / count
        var = sum_square_v / count - mean * mean
    std = np.sqrt(np.maximum(var, eps))

    mask = make_pad_mask(features_len, features, 1)
    features -= mean
    features = mask_fill(features, mask, 0.0)
    features /= std

    return features, features_len


def utterance_mvn(features, features_len):
    """Apply utterance mean and variance normalization

    Args:
        x: (B, T, D), assumed zero padded
        ilens: (B,)
        norm_means:
        norm_vars:
        eps:

    """
    norm_means = True
    norm_vars = False
    eps = 1.0e-20

    if features_len is None:
        features_len = np.full([features.shape[0]], features.shape[1])
    ilens_ = features_len.reshape(-1, *[1 for _ in range(len(features.shape) - 1)])

    # Zero padding
    features = mask_fill(features, make_pad_mask(features_len, features, 1), 0.0)
    # mean: (B, 1, D)
    mean = features.sum(axis=1, keepdims=True) / ilens_

    if norm_means:
        features -= mean

        if norm_vars:
            var = np.power(features, 2).sum(axis=1, keepdims=True) / ilens_
            std = np.clip(np.sqrt(var), eps, None)
            features = features / np.sqrt(std)
        return features, features_len
    else:
        if norm_vars:
            y = features - mean
            y = mask_fill(y, make_pad_mask(features_len, y, 1), 0.0)
            var = np.power(y, 2).sum(axis=1, keepdims=True) / ilens_
            std = np.clip(np.sqrt(var), eps, None)
            features /= std
        return features, features_len


#####################
# config

encoder_model_path = "/home/gyf/.cache/espnet_onnx/conformer/full/xformer_encoder.onnx"
# encoder_model_path = "C:\\Users\\taylorguo\\.cache\\espnet_onnx\\conformer\\full\\xformer_encoder.onnx"
# encoder_model_path = "C:\\Users\\taylorguo\\Documents\\biren\\cmcc_model_2022\\conformer_encoder.onnx"
providers = ['CPUExecutionProvider'] # , 'CUDAExecutionProvider'
session_encoder = onnxruntime.InferenceSession(encoder_model_path, providers=providers)


onesided = False #True
normalized = False
center = True
input_lens = 1 ###
n_fft = 512
hop_length=128
win_length = 512


do_normalize = True
normalize_type = "gmvn"
do_preencoder = False

fmin = 0
fmax = 8000.0
log_base = None # null

#######################
# 1. Extract feature

input, sr = librosa.load(str(wav_file), sr=16000)
# input, _ = soundfile.read(wav_file)
print(" ****** librosa load:  ", type(input), input.shape)

#################

# check dtype
if input.dtype != np.float32:
    input = input.astype(np.float32)

# data: (Nsamples,) -> (1, Nsamples)
input = input[np.newaxis, :]
# lengths: (1,)
lengths = np.array([input.shape[1]]).astype(np.int64)
print(" ****** input:  ", type(input), input.shape)
################################################################
# Encoder
################################################################
# Encoder::Frontend::STFT  : input, input_lens
stft_kwargs = dict(n_fft=512, win_length=512, hop_length=128, center=True, window="hann")

output = []
for i, instance in enumerate(input):
    stft = librosa.stft(input[i], **stft_kwargs)
    print(" ****** STFT  [stft.real, stft.imag]:  ", type(stft), stft.shape, stft.real.shape, stft.imag.shape)
    output.append(np.array(np.stack([stft.real, stft.imag], -1)))
output = np.vstack(output).reshape(len(output), *output[0].shape)
print(" ****** STFT - np.vstack :  ", type(output), output.shape)
# output = torch.Tensor(output)
if not onesided:
    len_conj = 512 - output.shape[1]  # n_fft
    # conj = output[:, 1: 1 + len_conj].flip(1)
    conj = np.flip(output[:, 1: 1 + len_conj], axis=1)
    # conj[:, :, :, -1].data *= -1
    conj[:, :, :, -1] = conj[:, :, :, -1] * (-1)
    output = np.concatenate([output, conj], 1)

if normalized:
    output = output * (stft_kwargs["window"].shape[0] ** (-0.5))
print(" ****** STFT - normalize:  ", type(output), output.shape)
# output: (Batch, Freq, Frames, 2=real_imag)
# -> (Batch, Frames, Freq, 2=real_imag)
output = output.transpose(0, 2, 1, 3)

if lengths is not None:
    if center :
        pad = n_fft // 2
        lengths = lengths + 2 * pad
    output_lens = (lengths - n_fft) // hop_length + 1
    # output_lens = np.array([output_lens]).astype(np.int64)
    output_stft = mask_fill(output, make_pad_mask(output_lens, output, dim=1), 0.0)
else:
    output_lens = None

print(" ****** STFT pad_mask:  ", type(output_lens), output_lens, output_stft.shape)
# STFT End -> ouput_stft, output_lens
################################################################
################################################################
#####################
# 
# 1.3. STFT -> Power spectrum
# h: ComplexTensor(B, T, F) -> torch.Tensor(B, T, F)
input_power = output_stft[..., 0]**2 + output_stft[..., 1]**2

##########################################################
# Log Mel:  input_power, output_lens
# convert STFT to fbank features
# 1.4. Feature transform e.g. Stft -> Log-Mel-Fbank
# input_power: (Batch, [Channel,] Length, Freq)
#       -> input_feats: (Batch, Length, Dim)
mel_kwargs = dict(sr=16000, n_fft=1022, n_mels=80, fmin=0, fmax=8000.0, htk=False)
melmat = librosa.filters.mel(**mel_kwargs)

# feat: (B, T, D1) x melmat: (D1, D2) -> mel_feat: (B, T, D2)
print(input_power.shape, melmat.T.shape)
mel_feat = np.matmul(input_power, melmat.T)
mel_feat = np.clip(mel_feat, 1e-10, float("inf"))

log_base = None
if log_base is None:
    logmel_feat = np.log(mel_feat)
elif log_base == 2.0:
    logmel_feat = np.log2(mel_feat)
elif log_base == 10.0:
    logmel_feat = np.log10(mel_feat)
else:
    logmel_feat = np.log(mel_feat) / np.log(log_base)

# zero padding
# output_lens = np.array([output_lens]).astype(np.int64)
logmel_feat = mask_fill(logmel_feat, make_pad_mask(output_lens, logmel_feat, 1), 0.0)
print(" ****** zero_padding:  ", type(output_lens), output_lens, logmel_feat.shape)
#############################################################
# 2. normalize features with global MVN
if do_normalize:
    if normalize_type == "gmvn":
        feats, feat_length = global_mvn(logmel_feat, output_lens)
    elif normalize_type == "utterance_mvn":
        feats, feat_length = utterance_mvn(logmel_feat, output_lens)
print(" ****** gmvn:  ", type(feat_length), feat_length, feats.shape)
######################
# 3. encoder inference 

# encoder_kwargs 
encoder_out, encoder_out_lens = session_encoder.run(["encoder_out", "encoder_out_lens"], {"feats": feats, "feats_length": feat_length})
# encoder_out, encoder_out_lens = session_encoder.run(["encoder_out", "encoder_out_lens"], {"feats": feats})

print(encoder_out)
print(encoder_out_lens)
