
from typing import Union
from typeguard import check_argument_types
from pathlib import Path

import os
import sys
import glob
from datetime import datetime
import shutil
import logging
import numpy as np
import torch
from onnxruntime.quantization import quantize_dynamic

from espnet2.bin.asr_inference import Speech2Text
from espnet2.text.sentencepiece_tokenizer import SentencepiecesTokenizer


from get_config import get_ngram_config, get_beam_config, get_token_config, get_tokenizer_config, get_weights_transducer, get_trans_beam_config
from config import save_config, update_model_path

from models.encoders.encoder import ConformerEncoder
from models.decoders.decoder import TransformerDecoder
from models.ctc import CTC
from models.language_models.transformer import TransformerLM

class SpeechRecognizer():
    # """
    # """
    def __init__(self, model_dir) -> None:
        self.model_dir = model_dir

    def _load_config(self):

        config_file = glob.glob(os.path.join(self.model_dir, '*.*'))
        print(config_file)

sr = SpeechRecognizer("/home/gyf/pkg/xxgg/github/ai_app/cmcc/docs/cmcc_2022/conformer_espnet")
sr._load_config()


class ModelExportOnnx:
    def __init__(self, model_dir: Union[Path, str] = None):
        assert check_argument_types()
        if model_dir is None:
            model_dir = os.path.join(os.path.dirname(__file__), "onnx_models")
            # model_dir = Path.home() / ".cache" / "espnet_onnx"

        self.model_dir = model_dir # Path(model_dir)
        self.export_config = {}
    
    def export(self, model: Speech2Text, tag_name: str = None, quantize: bool = False, verbose: bool = False):
        assert check_argument_types()
        if tag_name is None:
            tag_name = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        base_dir = os.path.join(self.model_dir, tag_name.replace(' ', '_'))
        export_dir = os.path.join(base_dir, 'models')
        # export_dir.mkdir(parents=True, exist_ok=True)
        if os.path.exists(export_dir): shutil.rmtree(export_dir)
        os.makedirs(export_dir)

        # copy model files
        self._copy_files(model, base_dir, verbose)
        model_config = self._create_config(model, export_dir)

        # export encoder
        enc_model = ConformerEncoder(model.asr_model.encoder, self.export_config)
        enc_out_size = enc_model.get_output_size()
        self._export_encoder(enc_model, export_dir, verbose)
        model_config.update(encoder=enc_model.get_model_config(model.asr_model, export_dir))

        # export decoder
        dec_model = TransformerDecoder(model.asr_model.decoder, self.export_config)
        self._export_decoder(dec_model, enc_out_size, export_dir, verbose)
        model_config.update(decoder=dec_model.get_model_config(export_dir))

        # export CTC
        ctc_model = CTC(model.asr_model.ctc.ctc_lo)
        self._export_ctc(ctc_model, enc_out_size, export_dir, verbose)
        model_config.update(ctc=ctc_model.get_model_config(export_dir))

        # export LM
        lm_model = None
        if not model.asr_model.use_transducer_decoder:
            if 'lm' in model.beam_search.scorers.keys():
                lm_model = TransformerLM(model.beam_search.scorers['lm'], self.export_config)
        
        if lm_model is not None:
            self._export_lm(lm_model, export_dir, verbose)
            model_config.update(lm=lm_model.get_model_config(export_dir))

        # export quantization model
        if quantize:
            quant_dir = os.path.join(base_dir, 'quantization')
            # quant_dir.mkdir(exist_ok=True)
            if os.path.exists(export_dir): shutil.rmtree(export_dir)
            os.makedirs(quant_dir)
            quant_config = self._quantize_model(export_dir, quant_dir, verbose)
            for m in quant_config.keys():
                if 'predecoder' in m:
                    model_idx = int(m.split('_')[1])
                    model_config['decoder']['predecoder'][model_idx].update(quantized_model_path=quant_config[m])
                else:
                    model_config[m].update(quantized_model_path=quant_config[m])

        config_name = os.path.join(base_dir, 'config.yaml')
        save_config(model_config, config_name)
        update_model_path(tag_name, base_dir)


    def export_from_pretrained(self, tag_name: str, quantize: bool = False):
        assert check_argument_types()
        model = Speech2Text.from_pretrained(tag_name)
        self.export(model, tag_name, quantize)
    
    def export_from_zip(self, path: Union[Path, str], tag_name: str, quantize: bool = False):
        assert check_argument_types()
        model = Speech2Text.from_pretrained(path)
        self.export(model, tag_name, quantize)
    
    def set_export_config(self, **kwargs):
        for k, v in kwargs.items():
            self.export_config[k] = v

    def _create_config(self, model, path):
        ret = {}
        if not model.asr_model.use_transducer_decoder:
            if "ngram" in list(model.beam_search.full_scorers.keys()) \
                    + list(model.beam_search.part_scorers.keys()):
                ret.update(ngram=get_ngram_config(model))
            else:
                ret.update(ngram=dict(use_ngram=False))
            ret.update(weights=model.beam_search.weights)
            ret.update(beam_search=get_beam_config(
                model.beam_search, model.minlenratio, model.maxlenratio))
        else:
            ret.update(weights=get_weights_transducer(
                model.beam_search_transducer))
            ret.update(beam_search=get_trans_beam_config(
                model.beam_search_transducer
            ))
            
        ret.update(transducer=dict(use_transducer_decoder=model.asr_model.use_transducer_decoder))
        ret.update(token=get_token_config(model.asr_model))
        ret.update(tokenizer=get_tokenizer_config(model.tokenizer, path))
        return ret
    
    def _export_model(self, model, verbose, path, enc_size=None):
        if enc_size:
            dummy_input = model.get_dummy_inputs(enc_size)
        else:
            dummy_input = model.get_dummy_inputs()
            
        torch.onnx.export(
            model,
            dummy_input,
            os.path.join(path, f'{model.model_name}.onnx'),
            verbose=verbose,
            opset_version=15,
            input_names=model.get_input_names(),
            output_names=model.get_output_names(),
            dynamic_axes=model.get_dynamic_axes()
        )
        
        # export submodel if required
        if hasattr(model, 'submodel'):
            for i, sm in enumerate(model.submodel):
                if sm.require_onnx():
                    self._export_model(sm, verbose, path, enc_size)

    def _export_encoder(self, model, path, verbose):
        if verbose:
            logging.info(f'Encoder model is saved in {file_name}')
        self._export_model(model, verbose, path)

    def _export_decoder(self, model, enc_size, path, verbose):
        if verbose:
            logging.info(f'Decoder model is saved in {file_name}')
        self._export_model(model, verbose, path, enc_size)

    def _export_ctc(self, model, enc_size, path, verbose):
        if verbose:
            logging.info(f'CTC model is saved in {file_name}')
        self._export_model(model, verbose, path, enc_size)

    def _export_lm(self, model, path, verbose):
        if verbose:
            logging.info(f'LM model is saved in {file_name}')
        self._export_model(model, verbose, path)
    
    def _export_joint_network(self, model, path, verbose):
        if verbose:
            logging.info(f'JointNetwork model is saved in {file_name}')
        self._export_model(model, verbose, path)
        
    def _copy_files(self, model, path, verbose):
        # copy stats file
        if model.asr_model.normalize is not None \
                and hasattr(model.asr_model.normalize, 'stats_file'):
            stats_file = model.asr_model.normalize.stats_file
            shutil.copy(stats_file, path)
            if verbose:
                logging.info(f'`stats_file` was copied into {path}.')

        # copy bpemodel
        if isinstance(model.tokenizer, SentencepiecesTokenizer):
            bpemodel_file = model.tokenizer.model
            shutil.copy(bpemodel_file, path)
            if verbose:
                logging.info(f'bpemodel was copied into {path}.')
            
        # save position encoder parameters.
        if hasattr(model.asr_model.encoder, 'pos_enc'):
            np.save(
                path / 'pe',
                model.asr_model.encoder.pos_enc.pe.numpy()
            )
            if verbose:
                logging.info(f'Matrix for position encoding was copied into {path}.')

    def _quantize_model(self, model_from, model_to, verbose):
        if verbose:
            logging.info(f'Quantized model is saved in {model_to}.')
        ret = {}
        models = glob.glob(os.path.join(model_from, "*.onnx"))
        for m in models:
            basename = os.path.basename(m).split('.')[0]
            export_file = os.path.join(model_to, basename + '_qt.onnx')
            quantize_dynamic(
                m,
                export_file,
                op_types_to_quantize=['Attention', 'MatMul']
            )
            ret[basename] = export_file
            os.remove(os.path.join(model_from, basename + '-opt.onnx'))
        return ret


if __name__ == '__main__':
    def convert_espnet_onnx(model_zip=None, tag_name=None):
        model_zip = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/docs/cmcc_2022/conformer_espnet/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp_valid.acc.ave.zip"
        tag_name = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/asr_conformer/onnx/onnx_models/conformer/models"
        onnx_export = ModelExportOnnx()
        # onnx_export.set_export_config(max_seq_len=2048)
        onnx_export.export_from_zip(model_zip, tag_name=tag_name, quantize=False)

    convert_espnet_onnx()
    