import traceback
import torch
from transformers import AutoModelForMaskedLM

from p_tuning.data_handle.data_manager import DataManager
from p_tuning.model_tokenizer import ModelTokenizer
from p_tuning.parameter_config import InferenceConfig
from utils.common_utils import convert_logits_to_ids
from utils.label_verbalize import LabelVerbalize

PAD = '[PAD]'
pad_id = 0


class InferencePTun:
    def __init__(self):
        self.config = InferenceConfig()
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.tokenizer = None
        self.model = None
        self.history = []
        self.label_verbalize = None

    def load_model_tokenizer(self):
        """
        加载模型参数 和 tokenizer
        :return:
        """  # 加载预训练模型
        self.model = AutoModelForMaskedLM.from_pretrained(self.config.used_model_path)
        self.model.to(self.device).eval()
        self.tokenizer = ModelTokenizer(self.config.tokenizer_path).get_tokenizer()
        # assert这里相当于确认：
        assert self.model.config.vocab_size == self.tokenizer.vocab_size
        self.label_verbalize = LabelVerbalize(
            tokenizer=self.tokenizer,
            max_label_len=self.config.max_label_len
        )

    def inference_run(self, text):
        with torch.no_grad():
            examples = {'text': [text]}
            tokenized_output = DataManager.convert_example(
                examples,
                self.tokenizer,
                p_embedding_num=self.config.p_embedding_num,
                max_seq_len=self.config.max_seq_len,
                max_label_len=self.config.max_label_len,
                train_mode=False,
                return_tensor=True
            )
            logits = self.model(input_ids=tokenized_output['input_ids'].to(self.device),
                                token_type_ids=tokenized_output['token_type_ids'].to(self.device),
                                attention_mask=tokenized_output['attention_mask'].to(self.device)).logits
            # (batch, label_num)
            predictions = convert_logits_to_ids(logits, tokenized_output['mask_positions']).cpu().numpy().tolist()
            predictions = self.label_verbalize.batch_find_super_label(predictions)  # 找到子label属于的主label
            predictions = [ele['label'] for ele in predictions][0]
            print(predictions)
            return predictions

    def start(self):
        self.load_model_tokenizer()
        while True:
            try:
                text = input("user:")
                self.inference_run(text)
            except Exception as e:
                print(traceback.print_exc())


def main():
    inference = InferencePTun()
    inference.start()


if __name__ == '__main__':
    main()

