#  首先加载模型
import torch

from wenet.models.moe_comformer.mask import make_pad_mask
from wenet.models.moe_comformer.moe_comformer_FSQ import init_moe_comformer_with_FSQ
import os
import onnx
import warnings

class Wrapper_export_onnx(torch.nn.Module):
    def __init__(self, encoder, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.encoder = encoder

    def forward(self, speech, speech_lengths):
        masks_for_embed = ~make_pad_mask(speech_lengths, speech.size(1)).unsqueeze(1)  # (B, 1, T)
        xs, embeddings, masks_back_from_embed = self.encoder.embed(speech, masks_for_embed)
        masks = masks_back_from_embed
        chunk_masks = self.encoder.chunk_masker(masks, 1, -1)
        for enc in self.encoder.shared_encoder:
            xs = enc(xs, embeddings, masks, chunk_masks, None)
        quantize, indices = self.encoder.fsq(xs)
        seq_lengths = masks.squeeze(1).sum(1)
        return indices, seq_lengths

def export_model(model, input_tensors, input_names, output_names, dynamic_axes,
                 onnx_name_prefix, model_dir_path,opset_version, metadata: dict = None):
    save_path = os.path.join(model_dir_path, f'{onnx_name_prefix}.onnx')
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        torch.onnx.export(model, input_tensors, save_path,
                          input_names=input_names,
                          output_names=output_names,
                          dynamic_axes=dynamic_axes,
                          do_constant_folding=True,
                          training=torch.onnx.TrainingMode.PRESERVE,
                          verbose=False,
                          opset_version=opset_version,)
    tmp_model = onnx.load(save_path)
    if metadata:
        for k, v in metadata.items():
            tmp_model.metadata_props.append(
                onnx.StringStringEntryProto(key=k, value=v))
    onnx.save(tmp_model, save_path)
    print(f'Exported {save_path}')
    return save_path


if __name__ == '__main__':
    from gxl_ai_utils.utils import utils_file
    # test_config_path = "./conf/train_2025_9_10_xlgeng.yaml"
    test_config_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/code/wenet_tencent/examples/tencent_data/s0_fsq/conf/train_u2++_moe_conformer2.yaml"
    configs = utils_file.load_dict_from_yaml(test_config_path)
    encoder = init_moe_comformer_with_FSQ(configs)
    # ckpt_path="/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage2/step_17999_only_encoder.pt"
    # ckpt_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage3_causal/step_19499_only_encoder.pt"
    ckpt_path="/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage3_causal/step_28499_only_encoder.pt"
    encoder.load_state_dict(torch.load(ckpt_path, map_location=torch.device('cpu')))

    wrapper = Wrapper_export_onnx(encoder).to("cpu")
    wrapper.eval()

    B, T, F = 2, 160, 80
    speech = torch.randn(B, T, F)             # [B,T,F], float32
    lengths = torch.tensor([T, T - 14])       # [B], int64(默认)

    dynamic_axes = {
        "speech": {0: "batch", 1: "time_in"},
        "speech_lengths": {0: "batch"},
        "indices": {0: "batch", 1: "time_out"},   # 若无下采样可把 time_out 改为 time_in
        "seq_lengths": {0: "batch"},
    }
    # ⚠️ 去掉行尾多余的逗号
    input_names = ("speech", "speech_lengths")
    output_names = ("indices", "seq_lengths")

    save_path = export_model(
        model=wrapper,
        input_tensors=(speech, lengths),
        onnx_name_prefix="encoder_wrapper_causal_step28499",
        model_dir_path="./exp/onnx",
        input_names=input_names,
        output_names=output_names,
        dynamic_axes=dynamic_axes,          # ⚠️ 传入上面定义的 dynamic_axes
        metadata={
            "author": "xuelonggeng",
            "git_commit": "none",
            "pytorch_version": torch.__version__,
            "note": "wrapper forward: (indices, seq_lengths)"
        },
        opset_version=14,
    )



