import torch

from wenet.models.moe_comformer.moe_comformer_FSQ import init_moe_comformer_with_FSQ

if __name__ == '__main__':
    """"""
    from gxl_ai_utils.utils import utils_file
    now = utils_file.do_get_now_time_by_second()
    test_config_path = "examples/tencent_data/s0_fsq/conf/train_2025_9_10_xlgeng.yaml"
    configs = utils_file.load_dict_from_yaml(test_config_path)
    encoder = init_moe_comformer_with_FSQ(configs)
    print(encoder)
    fake_feats = torch.randn(2, 100, 80)
    fake_lens = torch.tensor([100, 93])
    device = torch.device("cuda:0")
    fake_feats = fake_feats.to(device)
    fake_lens = fake_lens.to(device)
    encoder.to(device)

    ckpt_path="/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage2/step_17999_only_encoder.pt"
    encoder.load_state_dict(torch.load(ckpt_path, map_location=torch.device('cpu')))
    index, seq_len = encoder.tokenizer(fake_feats, fake_lens)
    print(index)
    print(seq_len)

    import torchaudio.compliance.kaldi as kaldi
    import torchaudio
    waveform, sr = torchaudio.load("test/resources/aishell-BAC009S0724W0121.wav") # 采用率是16K
    print(waveform.shape)
    print(sr)
    if sr!=16000:
        waveform = torchaudio.transforms.Resample(sr, 16000)(waveform)
        print(waveform.shape)
        print(sr)
        sr = 16000
    if waveform.shape[0] != 1:
        waveform = waveform[0,:].unsqueeze(0)

    waveform = waveform * (1 << 15)
    # Only keep key, feat, label
    mat = kaldi.fbank(waveform,
                      num_mel_bins=80,
                      frame_length=25,
                      frame_shift=10,
                      dither=1.0,
                      energy_floor=0.0,
                      sample_frequency=sr,
                      window_type="povey")
    feats = mat.unsqueeze(0).to(device)
    print(feats.shape)
    feats_length = torch.tensor([feats.shape[1]]).to(device)
    print(feats_length)
    index, seq_len = encoder.tokenizer(feats, feats_length)
    print(index)
    print(seq_len)

