import os
import sys
import time
import logging

import torch
import numpy as np
from transformers import T5Tokenizer, T5ForConditionalGeneration

from .. import helper


def update_eosflag(
    eos_flag: torch.LongTensor,
    is_eos: torch.BoolTensor):
    eos_flag = eos_flag.mul((~is_eos).int())

    return eos_flag


class TorchInfer():
    def __init__(self, model_dir) -> None:
        self.max_dec_num = 32
        self.seq_len = 128

        self.tokenizer = T5Tokenizer.from_pretrained(model_dir)
        self.model = T5ForConditionalGeneration.from_pretrained(model_dir)

    def _preproc(self, data):
        tokens = self.tokenizer(data, padding="max_length", max_length=self.seq_len, return_tensors="pt")  # Batch size 1
        input_ids = tokens.input_ids
        attention_mask = tokens.attention_mask
        logging.info(f"ids shape:{input_ids.shape} {input_ids.dtype}")
        logging.info(f"mask shape:{attention_mask.shape} {attention_mask.dtype}")

        return input_ids, attention_mask

    def infer(self, contents):
        input_ids, attention_mask = self._preproc(contents)

        eos_token_id = 1
        pad_token_id = 0
        with torch.no_grad():
            enc_start = time.time()
            enc_hidden_state = self.model.encoder(input_ids=input_ids, attention_mask=attention_mask)[0]
            enc_time = (time.time() - enc_start) * 1000
            logging.debug(f"shape encoder:{enc_hidden_state.shape}")
            logging.info(f"encoder time:{enc_time:.3f} (ms)")

            eos_flag = input_ids.new(input_ids.shape[0]).fill_(1)
            dec_ids = torch.zeros(1, self.max_dec_num).int()

            dec_start = time.time()
            for i in range(self.max_dec_num):
                start_time = time.time()
                dec_hidden_state = self.model.decoder( input_ids=dec_ids, 
                                            encoder_attention_mask=attention_mask, 
                                            encoder_hidden_states=enc_hidden_state)[0]

                dec_time = (time.time() - start_time) * 1000
                start_time = time.time()
                logits = self.model.lm_head(dec_hidden_state[:, i, :])  # 注意优化点!!!
                lm_time = (time.time() - start_time) * 1000
                logging.debug(f"shape decoder:{dec_hidden_state.shape} lm:{logits.shape}")
                logging.debug(f"time decoder:{dec_time:.3f} (ms) lm_head:{lm_time:.3f} (ms)")

                next_tokens = torch.argmax(logits, dim=-1)
                logging.debug(f"argmax next_tokens:{next_tokens}")
                ### base value: 21126, 43, 2008, 24, 293, 53, 3, 9, 1782, 19, 207, 21, 25, 3, 5, 1

                next_tokens = next_tokens * eos_flag + pad_token_id * (1 - eos_flag)
                dec_ids[:, i+1] = next_tokens

                eos_flag = update_eosflag(eos_flag, next_tokens == eos_token_id)
                if eos_flag.max() == 0:
                    break

            dec_time = (time.time() - dec_start) * 1000
            avg_time = dec_time / (i+1)
            logging.info(f"dec num:{i+1} dec_all:{dec_time:.3f} (ms) dec_avg: {avg_time:.3f} (ms)")
            logging.debug(f"dec result:{dec_ids}")

        return dec_ids.numpy()