
import pickle
from keras.preprocessing.text import Tokenizer

from src.utils.log_util import logger
from src.lstm.config import Config


_tokenizer = None

def get_tokenizer(data):
    tokenizer = Tokenizer()
    tokenizer.fit_on_texts(data)
    return tokenizer


def load_tokenizer():
    global _tokenizer
    if not _tokenizer:
        print("tokenizer load {}".format(Config.tokenizer_path))
        with open(Config.tokenizer_path, mode='rb') as fr:
            _tokenizer = pickle.load(fr)
    return _tokenizer

    
def save_tokenizer(tokenizer):
    with open(Config.tokenizer_path, mode='wb') as fw:
        pickle.dump(tokenizer, fw)
    logger.info("tokenizer write {}".format(Config.tokenizer_path))