import os
import sys
from typing import List
import torch
import transformers
import torch_npu
from transformers import LlamaForCausalLM, LlamaTokenizer
import torch_npu
from transformers import AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor

def export_onnx(
    base_model: str = "models/llama",
    output_dir: str = "models/Janus-LLM.onnx",
):
    device = torch.device('npu:0')
    torch_npu.npu.set_compile_mode(jit_compile = False)
    model_path = "weights"
    vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
    tokenizer = vl_chat_processor.tokenizer

    vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
        model_path, trust_remote_code=True
    )
    vl_gpt = vl_gpt.to(torch.half).npu().eval()
    model = vl_gpt.language_model
    input_names = [ "attention_mask", "position_ids","past_key_values","input_embeds"]
    output_names = ["logits","out_key_values"]
    dynamic_axes = {
        "input_ids": {  1: "seq_length" },
        "attention_mask": {1:"all_len" },
        "position_ids": {  1: "seq_length" },
        "past_key_values": {  4: "kv_len" },
        "input_embeds":{1:"seq_length"}
    }
    cfg=model.model.config
    batch_size,seq_len,kv_len=1,1,1024
    all_len = seq_len + kv_len
    n_layers,n_heads,hidden_size=cfg.num_hidden_layers,cfg.num_key_value_heads,cfg.hidden_size
    head_dim = int(cfg.hidden_size / cfg.num_attention_heads)


    #input_ids = torch.zeros((batch_size,seq_len)).long().to("npu") # batch_size, new_sequence_length
    input_embeds = torch.zeros((batch_size,seq_len,2048)).half().to("npu") # batch_size, new_sequence_length
    attention_mask = torch.zeros((batch_size,all_len)).long().to("npu") # batch_size, all_sequence_length
    position_ids = torch.zeros((batch_size,seq_len)).long().to("npu") # batch_size, new_sequence_length
    past_key_values = torch.rand((n_layers,2,batch_size,n_heads, kv_len, head_dim),dtype=torch.float16).to("npu")
    input_args = (
        None,
        attention_mask,
        position_ids,
        past_key_values,
        input_embeds,
        None, #labels: Optional[torch.LongTensor] = None,
        True, #use_cache: Optional[bool] = None,
        False # output_attentions: Optional[bool] = None,
    )
    model.eval()
    with torch.no_grad():
        torch.onnx.export(
            model,
            f=output_dir,
            args=input_args,
            input_names=input_names,
            output_names=output_names,
            dynamic_axes=dynamic_axes,
            opset_version=14,
            export_params=True,
        )


if __name__ == "__main__":
    export_onnx()
