import traceback
import torch
from transformers import AutoModelForMaskedLM

from pet.data_handle.data_manager import DataManager
from pet.data_handle.template import HardTemplate
from pet.model_tokenizer import ModelTokenizer
from pet.parameter_config import InferenceConfig
from utils.common_utils import convert_logits_to_ids
from utils.label_verbalize import LabelVerbalize

PAD = '[PAD]'
pad_id = 0


class InferencePet:
    def __init__(self):
        self.config = InferenceConfig()
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.hard_template = HardTemplate(prompt=self.config.prompt)
        self.tokenizer = None
        self.model = None
        self.history = []
        self.label_verbalize = None

    def load_model_tokenizer(self):
        """
        加载模型参数 和 tokenizer
        :return:
        """  # 加载预训练模型
        self.model = AutoModelForMaskedLM.from_pretrained(self.config.used_model_path)
        self.model.to(self.device).eval()
        self.tokenizer = ModelTokenizer(self.config.tokenizer_path).get_tokenizer()
        # assert这里相当于确认：
        assert self.model.config.vocab_size == self.tokenizer.vocab_size
        self.label_verbalize = LabelVerbalize(
            tokenizer=self.tokenizer,
            max_label_len=self.config.max_label_len
        )

    def inference_run(self, text):
        with torch.no_grad():
            examples = {'text': [text]}
            tokenized_output = DataManager.convert_example(
                examples,
                self.tokenizer,
                hard_template=self.hard_template,
                max_seq_len=self.config.max_seq_len,
                max_label_len=self.config.max_label_len,
                train_mode=False,
                return_tensor=True
            )
            logits = self.model(input_ids=tokenized_output['input_ids'].to(self.device),
                                token_type_ids=tokenized_output['token_type_ids'].to(self.device),
                                attention_mask=tokenized_output['attention_mask'].to(self.device)).logits
            predictions = convert_logits_to_ids(logits, tokenized_output[
                'mask_positions']).cpu().numpy().tolist()  # (batch, label_num)
            predictions = self.label_verbalize.batch_find_super_label(predictions)  # 找到子label属于的主label
            predictions = [ele['label'] for ele in predictions][0]
            print(predictions)
            return predictions

    def start(self):
        self.load_model_tokenizer()
        while True:
            try:
                text = input("user:")
                self.inference_run(text)
            except Exception as e:
                print(traceback.print_exc())

    @staticmethod
    def top_k_top_p_filtering(logits, top_k=0, top_p=None, filter_value=-float('Inf')):
        """
        使用top-k和/或nucleus（top-p）筛选来过滤logits的分布
            参数:
                logits: logits的分布，形状为（词汇大小）
                top_k > 0: 保留概率最高的top k个标记（top-k筛选）。）。

        """
        assert logits.dim() == 1  # batch size 1 for now - could be updated for more but the code would be less clear
        top_k = min(top_k, logits.size(-1))  # Safety check：确保top_k不超过logits的最后一个维度大小

        if top_k > 0:
            # 移除概率小于top-k中的最后一个标记的所有标记
            # torch.topk()返回最后一维中最大的top_k个元素，返回值为二维(values, indices)
            # ...表示其他维度由计算机自行推断
            # print(f'torch.topk(logits, top_k)--->{torch.topk(logits, top_k)}')
            # print(f'torch.topk(logits, top_k)[0]-->{torch.topk(logits, top_k)[0]}')
            # print(f'torch.topk(logits, top_k)[0][..., -1, None]-->{torch.topk(logits, top_k)[0][..., -1, None]}')
            # print(f'torch.topk(logits, top_k)[0][-1]-->{torch.topk(logits, top_k)[0][-1]}')
            indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
            # print(f'indices_to_remove--->{indices_to_remove}')
            logits[indices_to_remove] = filter_value  # 对于topk之外的其他元素的logits值设为负无穷
            # print(f'logits--->{logits}')
        return logits


def main():
    inference = InferencePet()
    inference.start()


if __name__ == '__main__':
    main()
