# -*- coding: utf-8 -*-
"""
@date: 2020/12/4 15:00
@file: base_use.py
@author: lilong
@desc: 
"""
from bert_torch.tokenizers import Tokenizer

config_path = '../premodel/chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = '../premodel/chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '../premodel/chinese_L-12_H-768_A-12/vocab.txt'


if __name__ == '__main__':
    tokenizer = Tokenizer(dict_path, do_lower_case=True, simplified=True)  # 建立分词器
    pass
    # model = build_transformer_model(config_path, checkpoint_path)  # 建立模型，加载权重

    # # 编码测试
    # token_ids, segment_ids = tokenizer.encode(u'语言模型')
    # token_ids, segment_ids = to_array([token_ids], [segment_ids])
    #
    # print('\n ===== predicting =====\n')
    # print(model.predict([token_ids, segment_ids]))
