import os
import sys
import time
import logging

import torch
import numpy as np
from transformers import ElectraTokenizer, ElectraForQuestionAnswering, ElectraConfig

from .. import helper


class TorchInfer(object):
    def __init__(self, model_dir) -> None:
        self.seq_len = 384

        self.tokenizer = ElectraTokenizer.from_pretrained(model_dir)
        config = ElectraConfig.from_pretrained(model_dir)
        self.model = ElectraForQuestionAnswering.from_pretrained(model_dir, config=config)

    def _preproc(self, data):
        tokens = self.tokenizer(data, padding="max_length", max_length=self.seq_len, return_tensors="pt")  # Batch size 1
        input_ids = tokens.input_ids
        attention_mask = tokens.attention_mask
        token_type_ids = tokens.token_type_ids

        logging.info(f"ids shape:{input_ids.shape} {input_ids.dtype}")
        logging.info(f"mask shape:{attention_mask.shape} {attention_mask.dtype}")

        return input_ids, attention_mask, token_type_ids

    def infer(self, contents):
        input_ids, attention_mask, token_type_ids = self._preproc(contents)

        eos_token_id = 1
        pad_token_id = 0
        with torch.no_grad():
            time_start = time.time()
            result = self.model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
            infer_time = (time.time() - time_start) * 1000
            start_logits = result.start_logits
            end_logits = result.end_logits
            logging.info(f"shape start_logits:{result.start_logits.shape}")
            logging.info(f"shape end_logits:{result.end_logits.shape}")
            logging.info(f"infer time:{infer_time:.3f} (ms)")

        return start_logits.numpy(), end_logits.numpy()