import joblib
from keras_preprocessing.text import Tokenizer

t: Tokenizer = joblib.load('./03_tokenizer')

token = '王力宏'

token_index = t.texts_to_sequences([token])[0][0] - 1
zero_list = [0] * 6
zero_list[token_index] = 1
print(zero_list)
