from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer
from bert4keras.snippets import to_array
from utils.bert_info import BertInfo

bert_info_object = BertInfo()

tokenizer = Tokenizer(bert_info_object.dict_path,do_lower_case=True)
model = build_transformer_model(config_path=bert_info_object.config_path,
                                checkpoint_path=bert_info_object.checkpoint_path,
                                with_mlm =True)

token_ids,segment_ids = tokenizer.encode(u'科学是第一生产力')

token_ids[3] = token_ids[4] = tokenizer._token_dict['[MASK]']
token_ids,segment_ids = to_array([token_ids],[segment_ids])

probas = model.predict([token_ids,segment_ids])[0]
print(tokenizer.decode(probas[3:5].argmax(axis=1)))