import traceback
import torch
import torch.nn.functional as F
from transformers import GPT2LMHeadModel
from parameter_config import InferenceConfig
from tokenizer import Tokenizer

PAD = '[PAD]'
pad_id = 0


class Inference:
    def __init__(self):
        self.config = InferenceConfig()
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.tokenizer = Tokenizer(self.config.tokenizer_path).get_tokenizer()

        self.model = None
        self.history = []

    def load_model(self):
        model_path = self.config.used_model_path
        self.model = GPT2LMHeadModel.from_pretrained(model_path)
        self.model = self.model.to(self.device)
        self.model.eval()

    def build_input_ids(self, text):
        """
        构建数据数据
        :param text:
        :return:
        """
        text_ids = self.tokenizer.encode(text, add_special_tokens=False)
        input_ids = [self.tokenizer.cls_token_id]
        # 每个input以[CLS]为开头
        self.history.append(text_ids)
        # max_history_len目的：保存历史消息记录
        max_history_len = self.config.max_history_len
        self.history = self.history[-max_history_len:]
        for index, info in enumerate(self.history):
            input_ids.extend(info)
            input_ids.append(self.tokenizer.sep_token_id)
        # print(f'历史对话结束--》{input_ids}')
        return input_ids

    def inference_run(self, text):
        input_ids = self.build_input_ids(text)
        input_ids = torch.tensor(input_ids).long().to(self.device)
        input_ids = input_ids.unsqueeze(0)
        # print(f'符合模型的输入--》{input_ids.shape}')
        response = []  # 根据context，生成的response
        # 最多生成max_len个token：35
        for _ in range(self.config.max_token):
            outputs = self.model(input_ids=input_ids)
            logits = outputs.logits
            next_token_logits = logits[0, -1, :]
            # 对于已生成的结果generated中的每个token添加一个重复惩罚项，降低其生成概率
            for id in set(response):
                next_token_logits[id] /= self.config.repetition_penalty
            # 对于[UNK]的概率设为无穷小，也就是说模型的预测结果不可能是[UNK]这个token
            next_token_logits[self.tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf')

            filtered_logits = self.top_k_top_p_filtering(next_token_logits, top_k=self.config.top_k,
                                                         top_p=self.config.top_p)

            # torch.multinomial表示从候选集合中无放回地进行抽取num_samples个元素，权重越高，抽到的几率越高，返回元素的下标
            next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
            if next_token == self.tokenizer.sep_token_id:  # 遇到[SEP]则表明response生成结束
                break
            response.append(next_token.item())
            # print(f'response-->{response}')
            input_ids = torch.cat((input_ids, next_token.unsqueeze(0)), dim=1)

        self.history.append(response)
        text = self.tokenizer.convert_ids_to_tokens(response)
        response_text = ''.join(text)
        print(f"chatbot:{response_text}" )
        return response_text

    def start(self):
        self.load_model()
        while True:
            try:
                text = input("user:")
                self.inference_run(text)
            except Exception as e:
                print(traceback.print_exc())

    @staticmethod
    def top_k_top_p_filtering(logits, top_k=0, top_p=None, filter_value=-float('Inf')):
        """
        使用top-k和/或nucleus（top-p）筛选来过滤logits的分布
            参数:
                logits: logits的分布，形状为（词汇大小）
                top_k > 0: 保留概率最高的top k个标记（top-k筛选）。）。

        """
        assert logits.dim() == 1  # batch size 1 for now - could be updated for more but the code would be less clear
        top_k = min(top_k, logits.size(-1))  # Safety check：确保top_k不超过logits的最后一个维度大小

        if top_k > 0:
            # 移除概率小于top-k中的最后一个标记的所有标记
            # torch.topk()返回最后一维中最大的top_k个元素，返回值为二维(values, indices)
            # ...表示其他维度由计算机自行推断
            # print(f'torch.topk(logits, top_k)--->{torch.topk(logits, top_k)}')
            # print(f'torch.topk(logits, top_k)[0]-->{torch.topk(logits, top_k)[0]}')
            # print(f'torch.topk(logits, top_k)[0][..., -1, None]-->{torch.topk(logits, top_k)[0][..., -1, None]}')
            # print(f'torch.topk(logits, top_k)[0][-1]-->{torch.topk(logits, top_k)[0][-1]}')
            indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
            # print(f'indices_to_remove--->{indices_to_remove}')
            logits[indices_to_remove] = filter_value  # 对于topk之外的其他元素的logits值设为负无穷
            # print(f'logits--->{logits}')
        return logits


def main():
    inference = Inference()
    inference.start()


if __name__ == '__main__':
    main()
