import numpy as np

from .model import vocabulary, context_limit, model_name, model, vocab_name
from .tokenizer import tokenizer

print('loading id2word')
id2word = []
for i in range(vocabulary):
    id2word.append(
        tokenizer.token_texts(np.array([i]))
    )
print('id2word loaded')
