
from ast import arg
from base64 import encode
from json import decoder
from statistics import mode
from typing import Optional, Sequence, Union
import torch

import argparse
import yaml
from pathlib import Path

import soundfile
import logging
logging.basicConfig(level=logging.INFO)

dir_path = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/docs/cmcc_2022/conformer_espnet/"
# config_file = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/docs/cmcc_2022/conformer_espnet/meta.yaml"
config_file = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/docs/cmcc_2022/conformer_espnet/exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/config.yaml"
# config_file = "C:\\Users\\taylorguo\\Documents\\biren\\cmcc_model_2022\\conformer\\models\\exp\\asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp\\config.yaml"
config_file = Path(config_file)

model_file = dir_path + "exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/valid.acc.ave_10best.pth"

config_file_lm = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/docs/cmcc_2022/conformer_espnet/exp/lm_train_lm_transformer_char_batch_bins2000000/config.yaml"
config_file_lm = Path(config_file_lm)
model_file_lm = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/docs/cmcc_2022/conformer_espnet/exp/lm_train_lm_transformer_char_batch_bins2000000/valid.loss.ave_10best.pth"

from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.asr.specaug.specaug import SpecAug
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
from espnet2.asr.decoder.transformer_decoder import TransformerDecoder
from espnet2.asr.ctc import CTC
from espnet2.asr.espnet_model import ESPnetASRModel

from espnet2.lm.transformer_lm import TransformerLM
from espnet2.lm.espnet_model import ESPnetLanguageModel

from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.nets.beam_search import BeamSearch
from espnet.nets.scorer_interface import BatchScorerInterface

from espnet.nets.batch_beam_search_online_sim import BatchBeamSearchOnlineSim
from espnet.nets.batch_beam_search import BatchBeamSearch

from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter

from espnet2.torch_utils.device_funcs import to_device



def preprocess_audio(audio_ndarray):
    speech = torch.tensor(audio_ndarray)
    # data: (Nsamples,) -> (1, Nsamples)
    speech = speech.unsqueeze(0).to(getattr(torch, "float32"))
    # lengths: (1,)
    lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
    batch = {"speech": speech, "speech_lengths": lengths}
    logging.info("speech length: " + str(speech.size(1)))

    return batch

####################
def get_args(config_file):

    with config_file.open("r", encoding="utf-8") as f:
        args = yaml.safe_load(f)
        # print(args)
        # print(type(args))
    args = argparse.Namespace(**args)
    # print(args)

    return args

# get_args(config_file)


##############################
# ASR model
@torch.no_grad()
def build_model_asr(args_dict):
    # token_list
    # print(args_dict.token_list)
    vocab_size = len(args_dict.token_list)

    # frontend
    if args_dict.frontend_conf["fs"] == "16k":
        frontend = DefaultFrontend()
    else: # TODO: to update parsing args_dict
        frontend = DefaultFrontend(args_dict.frontend_conf)
    input_size = frontend.output_size()

    # spectrogram augmentation
    specaug = SpecAug(**args_dict.specaug_conf)

    # Normalization layer
    args_dict.normalize_conf["stats_file"] = dir_path +  args_dict.normalize_conf["stats_file"] 
    normalize = GlobalMVN(**args_dict.normalize_conf)

    # encoder
    encoder = ConformerEncoder(input_size=input_size, **args_dict.encoder_conf)
    encoder_output_size = encoder.output_size()
    logging.info(encoder)
    # decoder
    decoder = TransformerDecoder(vocab_size=vocab_size, encoder_output_size=encoder_output_size, **args_dict.decoder_conf)

    # CTC
    ctc = CTC(odim=vocab_size, encoder_output_size=encoder_output_size, **args_dict.ctc_conf)

    # build model with above components
    model = ESPnetASRModel(vocab_size=vocab_size, frontend=frontend, specaug=specaug, normalize=normalize, preencoder=None, \
                encoder=encoder, postencoder=None, decoder=decoder, ctc=ctc, joint_network=None, token_list=args_dict.token_list, **args_dict.model_conf)

    return model


##############################
# Language model
def build_model_lm(args_dict):
    # token_list
    vocab_size = len(args_dict.token_list)

    # TransformerLM network
    lm = TransformerLM(vocab_size=vocab_size, **args_dict.lm_conf)

    # build TransformerLM model
    model = ESPnetLanguageModel(lm=lm, vocab_size=vocab_size, **args_dict.model_conf)

    # print(model)
    return model


##############################
# 
@torch.no_grad()
def build_model_from_file(audio_ndarray, args_dict_asr, model_file_asr, args_dict_lm, model_file_lm, device = "cpu", dtype="float32"):

    ctc_weight=0.5
    lm_weight=1.0
    ngram_weight=0.9
    penalty=0.0

    beam_size = 20

    batch_size = 1
    streaming = False

    if device == "cuda":
        device = f"cuda:{torch.cuda.current_device()}"

    scores = {}

    model_asr = build_model_asr(args_dict_asr)
    model_asr.to(device)
    model_asr.load_state_dict(torch.load(model_file_asr, map_location=device))

    print(model_asr.ctc)
    print(getattr(torch, dtype))
    model_asr.to(dtype=getattr(torch, dtype)).eval()

    decoder = model_asr.decoder
    ctc = CTCPrefixScorer(ctc=model_asr.ctc, eos=model_asr.eos)
    token_list = model_asr.token_list
    scores.update(decoder=decoder, ctc=ctc, length_bonus=LengthBonus(len(token_list)))
    scores["ngram"] = None

    weights = dict(decoder=1.0 - ctc_weight, ctc=ctc_weight, lm=lm_weight, ngram=ngram_weight, length_bonus=penalty)
    beam_search = BeamSearch(beam_size=beam_size, weights=weights, scorers=scores, sos=model_asr.sos, eos=model_asr.eos, \
                            vocab_size=len(token_list), token_list = token_list, pre_beam_score_key=None if ctc_weight == 1.0 else "full")

    if batch_size == 1:
        non_batch = [k for k, v in beam_search.full_scorers.items() if not isinstance(v, BatchScorerInterface)]

        if len(non_batch) == 0:
            if streaming:
                beam_search.__class__ = BatchBeamSearchOnlineSim
                beam_search.set_streaming_config(args_dict_asr)
                logging.info(
                    "BatchBeamSearchOnlineSim implementation is selected."
                )
            else:
                beam_search.__class__ = BatchBeamSearch
                logging.info("###### BatchBeamSearch implementation is selected.")
        else:
            logging.warning(
                f"As non-batch scorers {non_batch} are found, "
                f"fall back to non-batch implementation."
            )

    beam_search.to(device=device, dtype=getattr(torch, dtype)).eval()
    for scorer in scores.values():
        if isinstance(scorer, torch.nn.Module):
            scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
    logging.info(f"Beam_search: {beam_search}")
    logging.info(f"Decoding device={device}, dtype={dtype}")

    ###########################################################
    ###########################################################

    # token_type: char
    tokenizer = build_tokenizer(token_type=args_dict_asr.token_type)
    converter = TokenIDConverter(token_list=args_dict_asr.token_list)

    # batch = preprocess_audio(audio_ndarray)
    speech = torch.tensor(audio_ndarray)
    # data: (Nsamples,) -> (1, Nsamples)
    speech = speech.unsqueeze(0).to(getattr(torch, "float32"))
    # lengths: (1,)
    lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
    batch = {"speech": speech, "speech_lengths": lengths}
    logging.info("speech length: " + str(speech.size(1)))
    print(" ###### speech, lengths: ", speech.shape, lengths.shape)

    batch = to_device(batch, device=device)
    print(batch)
    # print(model_asr.encoder)
    # Forward Encoder
    
    enc, _ = model_asr.encode(**batch)

    # print(type(enc))

    nbest_hyps = beam_search(x=enc[0], maxlenratio=0.0, minlenratio=0.0)

    results = []
    for hyp in nbest_hyps:
        # assert isinstance(hyp, (Hypothesis, TransHypothesis)), type(hyp)

        # remove sos/eos and get results
        last_pos = -1
        if isinstance(hyp.yseq, list):
            token_int = hyp.yseq[1:last_pos]
        else:
            token_int = hyp.yseq[1:last_pos].tolist()

        # remove blank symbol id, which is assumed to be 0
        token_int = list(filter(lambda x: x != 0, token_int))

        # Change integer-ids to tokens
        token = converter.ids2tokens(token_int)

        if tokenizer is not None:
            text = tokenizer.tokens2text(token)
        else:
            text = None
        results.append((text, token, token_int, hyp))

    # assert check_return_type(results)
    print(results[0][0])
    return results


wav_path = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/asr_conformer/speech_recognition/wav/BAC009S0764W0121.wav"
audio, rate = soundfile.read(wav_path)

build_model_from_file(audio, get_args(config_file), model_file, get_args(config_file_lm), model_file_lm)


