from .model import model, context_limit, model_name, vocabulary, vocab_name
from .tokenizer import tokenizer
import numpy as np

print('loading id2word')
id2word = []
for i in range(vocabulary):
    id2word.append(tokenizer.token_texts(np.array([i])))
print('id2word loaded')
