import torch
from transformers import GPT2Config, GPT2LMHeadModel, GPT2TokenizerFast
from transformers import BertTokenizerFast
from parameter_config import ParameterConfig
from utils.top import top_K_P_filtering

# 加载参数
params = ParameterConfig()

# tokenizer = GPT2TokenizerFast(vocab_file=params.vocab_path,
#                               merges_file=params.merges_file,
#                               sep_token="[SEP]",
#                               pad_token="[PAD]",
#                               cls_token="[CLS]")
tokenizer = BertTokenizerFast(vocab_file=params.vocab_path,
                              sep_token="[SEP]",
                              pad_token="[PAD]",
                              cls_token="[CLS]")
# model = GPT2LMHeadModel.from_pretrained('./save_model/run/min_loss_model_24')
model = GPT2LMHeadModel.from_pretrained('./save_model/run/min_loss_model_24')
model = model.to(params.device)
model.eval()


def model_predict(text):
    history_token = []
    text_ids = tokenizer.encode(text, add_special_tokens=False)
    history_token.append(text_ids)
    input_ids = [tokenizer.cls_token_id]  # 每个input以[CLS]为开头 [101]
    for history_chat in history_token[-params.max_history_len:]:  # [-3:]
        input_ids.extend(history_chat)
        input_ids.append(tokenizer.sep_token_id)  # 102
    input_ids = torch.tensor(input_ids).long().to(params.device)
    input_ids = input_ids.unsqueeze(0)

    response = []
    for _ in range(params.max_len):
        outputs = model(input_ids=input_ids)
        logits = outputs.logits
        next_token_logits = logits[0, -1, :]  # 获取最后一个结果
        for id in set(response):
            next_token_logits[id] /= params.repetition_penalty
        next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf')
        filter_logits = top_K_P_filtering(next_token_logits, top_k=params.top_k)
        next_token = torch.multinomial(torch.softmax(filter_logits, dim=-1), num_samples=1)
        if next_token == tokenizer.sep_token_id:  # 遇到[SEP]则表明response生成结束
            break
        response.append(next_token.item())
        input_ids = torch.cat((input_ids, next_token.unsqueeze(0)), dim=1)

    history_token.append(response)
    text = tokenizer.convert_ids_to_tokens(response)
    return "".join(text)
