import os
import sys
from typing import List
import torch
import transformers
import torch_npu
from transformers import LlamaForCausalLM, LlamaTokenizer
import torch_npu
from transformers import AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor

def export_onnx(
    output_dir: str = "models/Janus-Embedding.onnx",
):
    device = torch.device('npu:0')
    torch_npu.npu.set_compile_mode(jit_compile = False)
    model_path = "weights"
    vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
    tokenizer = vl_chat_processor.tokenizer

    vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
        model_path, trust_remote_code=True
    )
    vl_gpt = vl_gpt.to(torch.half).npu().eval()
    model = vl_gpt.language_model.get_input_embeddings()
    
    input_names = ["input_ids"]
    output_names = ["input_embeds"]
    dynamic_axes = {
        "input_ids": { 0: "batch_size", 1: "seq_length" }
    }
    input_ids = torch.zeros((1,634)).long().to("npu")
    input_args = (
        input_ids
    )
    model.eval()
    with torch.no_grad():
        torch.onnx.export(
            model,
            f=output_dir,
            args=input_args,
            input_names=input_names,
            output_names=output_names,
            dynamic_axes=dynamic_axes,
            opset_version=13,
            export_params=True,
        )
if __name__ == "__main__":
    export_onnx()
