import onnxruntime as ort
import numpy as np
import torch

save_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/code/wenet_tencent/examples/tencent_data/s0_fsq/exp/onnx/encoder_wrapper_causal_step28499.onnx"
sess = ort.InferenceSession(save_path, providers=["CPUExecutionProvider"])

# 构造输入数据
B, T, F = 4, 160, 80
speech = torch.randn(B, T, F)  # 形状: [B, T, F]，数据类型: float32
lengths = torch.tensor([T,T, T-21, T - 14])  # 形状: [B]，数据类型: int64

# 转换为numpy数组（ONNX Runtime通常接受numpy输入）
x = speech.numpy()
l = lengths.numpy()

# 执行推理，获取输出
out = sess.run(["indices", "seq_lengths"], {"speech": x, "speech_lengths": l})
indices, seq_lengths = out  # 解包输出结果

# 打印indices的信息（索引序列）
print("===== indices =====")
print(f"内容: \n{indices}")
print(f"形状: {indices.shape}")
print(f"数据类型: {indices.dtype}")

# 打印seq_lengths的信息（序列长度）
print("\n===== seq_lengths =====")
print(f"内容: \n{seq_lengths}")
print(f"形状: {seq_lengths.shape}")
print(f"数据类型: {seq_lengths.dtype}")

speech = speech[:,:100, :]
lengths = torch.tensor([100, 100, 100, 100])
x = speech.numpy()
l = lengths.numpy()

out = sess.run(["indices", "seq_lengths"], {"speech": x, "speech_lengths": l})
indices, seq_lengths = out  # 解包输出结果

# 打印indices的信息（索引序列）
print("===== indices =====")
print(f"内容: \n{indices}")
print(f"形状: {indices.shape}")
print(f"数据类型: {indices.dtype}")

# 打印seq_lengths的信息（序列长度）
print("\n===== seq_lengths =====")
print(f"内容: \n{seq_lengths}")
print(f"形状: {seq_lengths.shape}")
print(f"数据类型: {seq_lengths.dtype}")



# =========================================================================
from wenet.utils.mask import make_pad_mask
class Wrapper_export_onnx(torch.nn.Module):
    def __init__(self, encoder, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.encoder = encoder

    def forward(self, speech, speech_lengths):
        masks_for_embed = ~make_pad_mask(speech_lengths, speech.size(1)).unsqueeze(1)  # (B, 1, T)
        xs, embeddings, masks_back_from_embed = self.encoder.embed(speech, masks_for_embed)
        masks = masks_back_from_embed
        chunk_masks = self.encoder.chunk_masker(masks, 1, -1)
        for enc in self.encoder.shared_encoder:
            xs = enc(xs, embeddings, masks, chunk_masks, None)
        quantize, indices = self.encoder.fsq(xs)
        seq_lengths = masks.squeeze(1).sum(1)
        return indices, seq_lengths

# =========================================================================
#  和 Python 的进行对比，对比成功，token 值一模一样
from gxl_ai_utils.utils import utils_file
from wenet.models.moe_comformer.moe_comformer_FSQ import init_moe_comformer_with_FSQ
test_config_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/code/wenet_tencent/examples/tencent_data/s0_fsq/conf/train_u2++_moe_conformer2.yaml"
configs = utils_file.load_dict_from_yaml(test_config_path)
encoder = init_moe_comformer_with_FSQ(configs)
ckpt_path="/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage3_causal/step_28499_only_encoder.pt"
encoder.load_state_dict(torch.load(ckpt_path, map_location=torch.device('cpu')))
wrapper = Wrapper_export_onnx(encoder)
wrapper.eval()
wrapper.to("cuda")
speech = speech.to("cuda")
lengths = lengths.to("cuda")
indeces, seq_lengths = wrapper(speech, lengths)
# 打印indices的信息（索引序列）
print("===== indices =====")
print(f"内容: \n{indices}")
print(f"形状: {indices.shape}")
print(f"数据类型: {indices.dtype}")

# 打印seq_lengths的信息（序列长度）
print("\n===== seq_lengths =====")
print(f"内容: \n{seq_lengths}")
print(f"形状: {seq_lengths.shape}")
print(f"数据类型: {seq_lengths.dtype}")

