import torch 
import torch_npu
from transformers import AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor
from janus.utils.io import load_pil_images
from torch import nn
torch_npu.npu.set_compile_mode(jit_compile = False)
input = torch.randn(1, 3, 384, 384).half().npu()
model_path = "weights"
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
    model_path, trust_remote_code=True
)
vl_gpt = vl_gpt.half().npu().eval()
model = vl_gpt.vision_model.half()
align = vl_gpt.aligner.half()
print(type(align))
new_model = nn.Sequential(model,align)
onnx_path = "./models/vision.onnx"
torch.onnx.export(
    new_model,  # 要导出的模型
    input,  # 模型的输入
    onnx_path,  # 导出的 ONNX 文件路径
    export_params=True,  # 保存模型的参数
    opset_version=14,  # ONNX 操作集版本
    do_constant_folding=True,  # 是否进行常量折叠优化
    input_names=['input'],  # 输入张量的名称
    output_names=['output'],  # 输出张量的名称
)
