import os, json, csv, numpy

import argparse
import yaml
from pathlib import Path

from net.encoder import Encoder
from net.beam_search import BeamSearch
from net.decoder import TransformerDecoder
from net.transformer_lm import TransformerLM
from net.batch_beam_search import BatchBeamSearch
from net.interface import BatchScorerInterface
from net.ctc_prefix_scorer import CTCPrefixScorer
from net.length_bonus import LengthBonus
from net.build_tokenizer import build_tokenizer
from net.token_id_converter import TokenIDConverter
from net.config import Config
from audio_preprocess import frontend_main
import logging
from typeguard import check_argument_types
from typing import List, Dict, Tuple


def read_csv(file_dir=None, file_name="result.csv"):
    csv_dict = {}
    if file_dir is not None:
        file_name = os.path.join(file_dir, file_name)
    with open(file_name, "r", encoding="utf-8-sig") as csv_file:
        csv_content = csv.reader(csv_file)
        header = next(csv_content)
        for row in csv_content:
            csv_dict[row[0]] = row[1]
    return csv_dict

# logging.basicConfig(level=logging.DEBUG)

#################

def get_config(path):
    _, ext = os.path.splitext(path)
    if ext == '.json':
        with open(path, 'r', encoding='utf-8') as f:
            dic = json.load(f)
    elif ext in ('.yaml', '.yml'):
        with open(path, 'r', encoding='utf-8') as f:
            dic = yaml.safe_load(f)
    else:
        raise ValueError('Configuration format is not supported.')
    return Config(dic)

# ################

def build_beam_search(args_cfg, scorers, weights):
    cfg_bs = args_cfg.beam_search
    cfg_token = args_cfg.token

    beam_search = BeamSearch(cfg_bs, cfg_token, scorers=scorers, weights=weights)

    non_batch = [
        k for k, v in beam_search.full_scorers.items()
        if not isinstance(v, BatchScorerInterface)
    ]
    if len(non_batch) == 0:
        beam_search.__class__ = BatchBeamSearch
        logging.info("BatchBeamSearch implementation is selected.")
    else:
        logging.warning(
            f"As non-batch scorers {non_batch} are found, "
            f"fall back to non-batch implementation."
        )
    
    return beam_search


def init_beam_search(args_cfg, providers: List[str] = ['CUDAExecutionProvider'], use_quant = False): #, 'CPUExecutionProvider'

    # TODO args_cfg -> Config
    # use_quant = False
    # providers: List[str] = ['CPUExecutionProvider']

    # Conformer Decoder
    # ONNX load decoder model
    decoder = TransformerDecoder(args_cfg.decoder, providers=providers, use_quantized=use_quant)
    scorers = {'decoder': decoder}
    weights = {}

    if not args_cfg.transducer.use_transducer_decoder:
        # Prefix CTC
        # ONNX load ctc model
        ctc = CTCPrefixScorer(args_cfg.ctc, args_cfg.token.eos, providers, use_quant)
        scorers.update(ctc=ctc, length_bonus=LengthBonus(len(args_cfg.token.list)) )
        weights.update(decoder=args_cfg.weights.decoder, ctc=args_cfg.weights.ctc,
                length_bonus=args_cfg.weights.length_bonus,
            )

    lm = TransformerLM(args_cfg.lm, providers, use_quant)
    # lm = get_lm(args_cfg, providers, use_quant)
    scorers.update(lm=lm)
    weights.update(lm=args_cfg.weights.lm)

    beam_search = build_beam_search(args_cfg, scorers, weights)
    tokenizer = None
    if args_cfg.tokenizer.token_type == "char":
        tokenizer = build_tokenizer(**args_cfg.tokenizer.dic)

    converter = TokenIDConverter(args_cfg.token.list) 

    return beam_search, tokenizer, converter


##########################
def get_args(config_file):
    
    with config_file.open("r", encoding="utf-8") as f:
        args = yaml.safe_load(f)
        # print(args)
        # print(type(args))
    args = argparse.Namespace(**args)
    # print(args)

    return args
##########################

# model_dir = "/public/ai_platform/models/cmcc_2022/conformer_2048"

model_dir = "/public/ai_platform/yfguo/conformer_onnx_modify/"

encoder = Encoder(model_dir)

# config_file = "onnx_config.yaml"
# config_file = Path(config_file)
# args_cfg_dict = get_args(config_file)
# args_cfg = get_config(config_file)

# beam_search, tokenizer, converter = init_beam_search(args_cfg)


def inference_main(speech, lengths):
    print("#"*80,speech.dtype)
    numpy.save("feats.npy", speech)
    enc, enc_len = encoder(speech, lengths)
    print(enc)
    print(enc_len)

    # if args_cfg.transducer.use_transducer_decoder:
    #     start_idx = 1
    #     last_idx = None
    # else:
    #     start_idx = 2
    #     last_idx = -1
    
    # # beam_search, tokenizer, converter = init_beam_search(args_cfg)

    # nbest_hyps = beam_search(enc[0])[:1]

    # results = []
    # for hyp in nbest_hyps:
    #     # remove sos/eos and get results
    #     if last_idx is not None:
    #         token_int = list(hyp.yseq[start_idx : last_idx])
    #     else:
    #         token_int = list(hyp.yseq[start_idx:])
            
    #     # remove blank symbol id, which is assumed to be 0
    #     token_int = list([int(i) for i in filter(lambda x: x != 0, token_int)])

    #     # Change integer-ids to tokens
    #     token = converter.ids2tokens(token_int)

    #     if tokenizer is not None:
    #         text = tokenizer.tokens2text(token)
    #     else:
    #         text = None
    #     results.append((text, token, token_int, hyp))

    # return results


if __name__ == '__main__':
    # import csv
    # import time

    # csv_dir = "/home/gyf/pkg/xxgg/github/ai_app/cmcc/asr_conformer/tensorrt"
    # csv_file = "result_dev.csv"
    # csv_dict = read_csv(csv_dir, csv_file)

    # dataset_dir = "/public/ai_platform/models/cmcc_2022/dataset_aishell"
    # test_set = "dev" # "dev": total time cost: 25498543.0164868
    # test_set = "test" # "test": total time cost: 25498543.0164868
    # save_csv = "result_%s.csv"%(test_set)

    # T1 = time.perf_counter()

    # for root, dirs, files in os.walk(os.path.join(dataset_dir, test_set)):
    #     for file in files:
    #         if file.endswith(".wav") and file not in csv_dict.keys() and file != "BAC009S0731W0379.wav" and file!="BAC009S0756W0343.wav":
    #             # print(file)
    #             # read audio file and preprocess the file
    #             wav_file = os.path.join(root, file)
    #             speech, lengths = frontend_main(wav_file, model_dir)

    #             T10 = time.perf_counter()
    #             result = inference_main(speech, lengths)
    #             T2 = time.perf_counter()

    #             print(file, result[0][0], (T2-T10)*1000)
    #             with open(save_csv, "a+", encoding="utf-8-sig", newline="") as csv_file:
    #                 writer = csv.writer(csv_file)
    #                 writer.writerow((file, result[0][0], (T2-T1)*1000))

    # Tn = time.perf_counter()
    # print((Tn-T1)*1000)


    # for Conformer wo Decoder
    wav_file = "/home/gyf/pkg/conformer_new/asr_conformer/speech_recognition/wav/BAC009S0764W0121.wav"
    speech, lengths = frontend_main(wav_file, model_dir)
    inference_main(speech, lengths)