import os
import sys
import logging

import torch
import numpy as np
from transformers import ElectraTokenizer, ElectraForQuestionAnswering, ElectraConfig

sys.path.append("..")
import helper

opsets = 12
model_dir = "../../models/electra"
seq_len = 384
tokenizer = ElectraTokenizer.from_pretrained(model_dir)
config = ElectraConfig.from_pretrained(model_dir)
model = ElectraForQuestionAnswering.from_pretrained(model_dir, config=config)

contents = "summarize: studies have shown that owning a dog is good for you"
tokens = tokenizer(contents, padding="max_length", max_length=seq_len, return_tensors="pt")  # Batch size 1
input_ids = tokens.input_ids
attention_mask = tokens.attention_mask
token_type_ids = tokens.token_type_ids
logging.debug(f"ids:{input_ids} mask:{attention_mask} token_type_ids:{token_type_ids}")
logging.info(f"ids shape:{input_ids.shape} dtype:{input_ids.dtype}")
logging.info(f"mask shape:{attention_mask.shape} dtype:{attention_mask.dtype}")
logging.info(f"token shape:{token_type_ids.shape} dtype:{token_type_ids.dtype}")


onnx_path = f"{model_dir}/electra.onnx"
torch.onnx.export(model, 
                args=(input_ids.int(), attention_mask.int(), token_type_ids.int()), 
                f=onnx_path, 
                opset_version=opsets,
                enable_onnx_checker=True,
                input_names=["input_ids", "attention_mask", "token_type_ids"],
                output_names=["start_logits", "end_logits"],
                dynamic_axes={
                    "input_ids":{0:"batch_size", 1:"seq_len"},
                    "attention_mask":{0:"batch_size", 1:"seq_len"},
                    "token_type_ids":{0:"batch_size", 1:"seq_len"},
                    "start_logits":{0:"batch_size", 1:"seq_len"},
                    "end_logits":{0:"batch_size", 1:"seq_len"}}
                )

