import paddle
import soundfile

import warnings
warnings.filterwarnings('ignore')

from yacs.config import CfgNode
from paddlespeech.audio.transform.spectrogram import LogMelSpectrogramKaldi
from paddlespeech.audio.transform.cmvn    import GlobalCMVN
from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
from paddlespeech.s2t.models.u2 import U2Model
from paddlespeech.s2t.frontend.audio import AudioSegment
from io import BytesIO


def initialize(config_path, decoding_method, checkpoint_path):
    transformer_config = CfgNode(new_allowed=True)
    # transformer_config.merge_from_file(config_path)
    with open(config_path, "r", encoding="utf-8") as f:
        cfg = transformer_config.load_cfg(f)
    transformer_config.merge_from_other_cfg(cfg)
    transformer_config.decoding.decoding_method = decoding_method

    # 构建 logmel 特征
    logmel_kaldi= LogMelSpectrogramKaldi(
                fs= 16000,
                n_mels= 80,
                n_shift= 160,
                win_length= 400,
                dither= True)

    # 特征减均值除以方差
    cmvn = GlobalCMVN(
        cmvn_path="data/mean_std.json"
    )

    model_conf = transformer_config.model
    # input_dim 存储的是特征的纬度
    model_conf.input_dim = 80
    # output_dim 存储的字表的长度
    model_conf.output_dim = 4233 
    model = U2Model.from_config(model_conf)
    model_dict = paddle.load(checkpoint_path)
    model.set_state_dict(model_dict)

    decoding_config = transformer_config.decoding
    text_feature = TextFeaturizer(unit_type='char',
                            vocab=transformer_config.collator.vocab_filepath)
    
    return logmel_kaldi, cmvn, model, decoding_config, text_feature

def speech_recon(audio, logmel_kaldi, cmvn, model, decoding_config, text_feature):
    audio = logmel_kaldi(audio, train=False)
    audio_feature_i = cmvn(audio)
    audio_len = audio_feature_i[:, 0]

    audio_len = paddle.to_tensor(audio_len)
    audio_feature = paddle.to_tensor(audio_feature_i, dtype='float32')
    audio_feature = paddle.unsqueeze(audio_feature, axis=1)

    print(audio_len.shape)
    print(audio_feature.shape)

    result_transcripts = model.decode(
            audio_feature,
            audio_len,
            text_feature=text_feature,
            decoding_method=decoding_config.decoding_method,
            beam_size=decoding_config.beam_size,
            ctc_weight=decoding_config.ctc_weight,
            decoding_chunk_size=decoding_config.decoding_chunk_size,
            num_decoding_left_chunks=decoding_config.num_decoding_left_chunks,
            simulate_streaming=decoding_config.simulate_streaming)
    return result_transcripts

if __name__ == "__main__":
    config_path = "conf/transformer.yaml" 
    checkpoint_path = "./exp/transformer/checkpoints/avg_20.pdparams"
    decoding_method = "attention"

    ini = initialize(config_path, decoding_method, checkpoint_path)

    audio, _ = soundfile.read("./input.wav", dtype="int16")

    res = speech_recon(audio, *ini)
    print(res)
