import onnxruntime as ort
import numpy as np
import torch

save_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/code/wenet_tencent/examples/tencent_data/s0_fsq/exp/onnx/encoder_wrapper.onnx"
sess = ort.InferenceSession(save_path, providers=["CPUExecutionProvider"])

# 构造输入数据
B, T, F = 2, 160, 80
# speech = torch.randn(B, T, F)  # 形状: [B, T, F]，数据类型: float32
# lengths = torch.tensor([T, T - 14])  # 形状: [B]，数据类型: int64

import torchaudio.compliance.kaldi as kaldi
import torchaudio

waveform, sr = torchaudio.load("/apdcephfs_qy3/share_976139/users/xuelonggeng/code/wenet_tencent/test/resources/aishell-BAC009S0724W0121.wav")  # 采用率是16K
print(waveform.shape)
print(sr)
if sr != 16000:
    waveform = torchaudio.transforms.Resample(sr, 16000)(waveform)
    print(waveform.shape)
    print(sr)
    sr = 16000
if waveform.shape[0] != 1:
    waveform = waveform[0, :].unsqueeze(0)

waveform = waveform * (1 << 15)
# Only keep key, feat, label
mat = kaldi.fbank(waveform,
                  num_mel_bins=80,
                  frame_length=25,
                  frame_shift=10,
                  dither=1.0,
                  energy_floor=0.0,
                  sample_frequency=sr,
                  window_type="povey")
device = torch.device("cuda")
speech = mat.unsqueeze(0).to(device)
print(speech.shape)
lengths = torch.tensor([speech.shape[1]]).to(device)
print(lengths)

# =========================================================================
#  和 Python 的进行对比，对比成功，token 值一模一样
from gxl_ai_utils.utils import utils_file
from wenet.models.moe_comformer.moe_comformer_FSQ import init_moe_comformer_with_FSQ
test_config_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/code/wenet_tencent/examples/tencent_data/s0_fsq/conf/train_u2++_moe_conformer2.yaml"
configs = utils_file.load_dict_from_yaml(test_config_path)
encoder = init_moe_comformer_with_FSQ(configs)
ckpt_path="/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage3_causal/step_19499_only_encoder.pt"
encoder.load_state_dict(torch.load(ckpt_path, map_location=torch.device('cpu')))
encoder.to("cuda")
encoder.eval()
speech = speech.to("cuda")
lengths = lengths.to("cuda")
indices, seq_lengths = encoder.tokenizer(speech, lengths)
# 打印seq_lengths的信息（序列长度）
print("===== indices =====")
print(f"内容: \n{indices}")
print(f"形状: {indices.shape}")
print(f"数据类型: {indices.dtype}")
print("\n===== seq_lengths =====")
print(f"内容: \n{seq_lengths}")
print(f"形状: {seq_lengths.shape}")
print(f"数据类型: {seq_lengths.dtype}")
speech = speech[:,:100,:]
lengths = torch.tensor([100])  # 形状: [B]，数据类型: int64
speech= speech.to("cuda")
lengths = lengths.to("cuda")
indices, seq_lengths = encoder.tokenizer(speech, lengths)
# 打印seq_lengths的信息（序列长度）
print("===== indices =====")
print(f"内容: \n{indices}")
print(f"形状: {indices.shape}")
print(f"数据类型: {indices.dtype}")

print("\n===== seq_lengths =====")
print(f"内容: \n{seq_lengths}")
print(f"形状: {seq_lengths.shape}")
print(f"数据类型: {seq_lengths.dtype}")
