import os
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import onnx
import onnxruntime as ort
import numpy as np

os.environ['TRANSFORMERS_OFFLINE'] = '1'

class ModelWrapper(torch.nn.Module):
    def __init__(self, model):
        super().__init__()
        self.model = model

    def forward(self, input_ids, attention_mask):
        # 显式传入 use_cache=False，确保不使用 past_key_values
        # outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, use_cache=False)
        # 只返回 logits，以避免复杂的输出结构导致的问题
        # return outputs.logits
        position_ids = attention_mask.long().cumsum(-1) - 1
        position_ids.masked_fill_(attention_mask == 0, 1)
        outputs = self.model(
            input_ids=input_ids,
            attention_mask=attention_mask,
            position_ids=position_ids,
            use_cache=False
        )
        return outputs.logits

def export_model_to_onnx(model_name, output_path, opset_version=15):
    tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
    model.eval()
    model.config.use_cache = True

    text = "This is a sample input for ONNX export."
    inputs = tokenizer(text, return_tensors="pt")

    # 用包装器替换原始模型
    wrapped_model = ModelWrapper(model)
    wrapped_model.eval()

    input_names = ["input_ids", "attention_mask"]
    output_names = ["logits"]
    #dynamic_axes = {
    #    "input_ids": {0: "batch_size", 1: "sequence_length"},
    #    "attention_mask": {0: "batch_size", 1: "sequence_length"},
    #    "logits": {0: "batch_size", 1: "sequence_length"}
    #}
    dynamic_axes = {
        "input_ids": {0: "batch_size", 1: "sequence_length"},
        "attention_mask": {0: "batch_size", 1: "sequence_length"},
        "position_ids": {0: "batch_size", 1: "sequence_length"},  # 添加position_ids的动态轴
        "logits": {0: "batch_size", 1: "sequence_length"}
    }

    # 导出为 ONNX
    torch.onnx.export(
        wrapped_model,
        args=(inputs["input_ids"], inputs["attention_mask"]),
        f=output_path,
        input_names=input_names,
        output_names=output_names,
        dynamic_axes=dynamic_axes,
        opset_version=opset_version,
        export_params=True,
        do_constant_folding=True,
    )
    print(f"模型已成功导出到 {output_path}")
    input("export suc:")

if __name__ == "__main__":
    model_name = "/home/icca/users/general/race/telechat/12B"
    # model_name = "/home/icca/users/general/race/telechat/TeleChat-12B-int8"
    onnx_model_path = "12B.onnx"
    export_model_to_onnx(model_name, onnx_model_path)
    # validate_onnx_model(onnx_model_path, model_name, "请回答你好")

