'''
Author: zhao-leo 18055219130@163.com
Date: 2024-10-22 21:26:15
LastEditTime: 2024-10-22 22:45:10
'''
from transformers import AutoTokenizer, AutoModelForMaskedLM
import torch
from .basic_model import BasicModel

# model_path = './.cache/models/bert-base-chinese/'
class bertModel(BasicModel):
  def __init__(self, model_path: str):
    super().__init__(model_path)

  def main_logic(self):
    # 从本地路径加载 tokenizer 和模型
    model_path = self.model_path
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = AutoModelForMaskedLM.from_pretrained(model_path)

    # 准备测试输入
    input_text = "欢迎来[MASK]里。"
    inputs = tokenizer(input_text, return_tensors="pt")

    # 推理
    with torch.no_grad():
        outputs = model(**inputs)

    # 获取预测结果
    logits = outputs.logits
    mask_token_index = tokenizer.convert_tokens_to_ids('[MASK]')
    mask_index = torch.where(inputs.input_ids == mask_token_index)[1].item()

    # 获取 mask 位置的所有 token logits
    mask_logits = logits[0, mask_index]

    # 转换为概率
    mask_probabilities = torch.nn.functional.softmax(mask_logits, dim=-1)

    # 获取概率最高的五个 token
    top_k = 5
    topk_probabilities, topk_indices = torch.topk(mask_probabilities, top_k)
    result = ""
    # 打印结果
    for index, prob in zip(topk_indices.tolist(), topk_probabilities.tolist()):
        token = tokenizer.decode(index)
        result += f"Token: {token}, Probability: {prob:.4f}\n"
    return result