import re

class Tokenizer:
    def __init__(self, vocab, delimiter=r'([,.:;?!_()"\']|--|\s)'):
        self.str_to_int = vocab
        self.int_to_str = {v: k for k, v in vocab.items()}
        self.delimiter = delimiter

    def encode(self, text):
        preprocessed_text = re.split(self.delimiter, text)
        return [self.str_to_int[token.strip()] for token in preprocessed_text
                if token.strip()]

    def decode(self, tokens):
        return [self.int_to_str[token] for token in tokens]
    
    def get_vocab(self):
        return self.int_to_str
    
    def n_vocab(self):
        return len(self.int_to_str)

