from bert4keras.backend import keras
from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer
import numpy as np
import os
BertPath = r'E:\nlp-data\pretrain_model\bert\model_uncase'

config_path = os.path.join(BertPath,'bert_config.json')
checkpoint_path = os.path.join(BertPath,'bert_model.ckpt')
dict_path = os.path.join(BertPath,'vocab.txt')

tokenizer = Tokenizer(dict_path,do_lower_case=True)
model = build_transformer_model(config_path,checkpoint_path)

token_ids,segment_ids = tokenizer.encode('语言模型')


print('\n ===== predicting =====\n')
print(model.predict([np.array([token_ids]),np.array([segment_ids])]))

print('\n ===== reloading and predicting =====\n')
model.save('test.model')
del model
model = keras.models.load_model('test.model')
print(model.predict([np.array([token_ids]), np.array([segment_ids])]))