#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File    :   prmlm.py    
@Contact :   cr_lgdx@163.com
@Author  :   lduml
@Modify Time      @Version    @Desciption
------------      --------    -----------
2020/5/24 10:00 下午    1.0         None
"""
import torch
from transformers import BertTokenizer, BertModel, BertForMaskedLM


def get_predict_mask(model_name, input_tx):
    """
    输入模型名字，或者本地地址，以及mask文本，输出整句的预测
    Args:
        model_name:
        input_tx:

    Returns:

    """
    tokenizer = BertTokenizer.from_pretrained(model_name)
    tokenized_text = tokenizer.tokenize(input_tx)
    indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)

    tokens_tensor = torch.tensor([indexed_tokens])
    # '是中国神魔小说的经典之作，与《三国演义》《水浒传》《红楼梦》并称为中国古典四大名著。'
    # '[MASK] [MASK] [MASK]'    '[CLS] [SEP]'
    # 42 + 3 + 2 = 47
    segments_tensors = torch.tensor([[0]*47])
    model = BertForMaskedLM.from_pretrained(model_name)
    outputs = model(tokens_tensor, token_type_ids=segments_tensors)
    # outputs 是一个tupple  --->
    predictions = outputs[0]
    # predictions.shape --> (1,47,21128)
    # predictions[0, 0].shape -->  torch.Size([21128]) 遍历47个纬度，即47个字的隐变量
    # predictions[0, 46].shape -->  torch.Size([21128])
    predicted_index = [torch.argmax(predictions[0, i]).item() for i in range(0,46)]
    predicted_token = [tokenizer.convert_ids_to_tokens([predicted_index[x]])[0] for x in range(1,46)]

    print('Predicted token is:',''.join(predicted_token))


input_tx = "[CLS] [MASK] [MASK] [MASK] 是中国神魔小说的经典之作，与《三国演义》《水浒传》《红楼梦》并称为中国古典四大名著。[SEP]"

model_dir = '/Volumes/diskd/work/model/chinese_wwm_ext_pytorch'
# 几种模型对比
get_predict_mask(model_dir, input_tx)
