import re

# text = "Hello, world. This. is a test"
# text = "Hello, world. Is this-- a test?"

# ['Hello,', ' ', 'world.', ' ', 'This.', ' ', 'is', ' ', 'a', ' ', 'test']
# result = re.split(r'(\s)', text)

# ['Hello', ',', '', ' ', 'world', '.', '', ' ', 'This', '.', '', ' ', 'is', ' ', 'a', ' ', 'test']
# result = re.split(r'([,.]|\s)', text)

# result = re.split(r'([,.:;?_!"()\']|--|\s)', text)

# ['Hello', ',', 'world', '.', 'This', '.', 'is', 'a', 'test']
# result = [item for item in result if item.strip()]

# print(result)

# preprocessed = re.split(r'([,.:;?_!"()\']|--|\s)', text)
# preprocessed = [item for item in preprocessed if item.strip()]

# all_words = sorted(set(preprocessed))

# vocabulary contains all unique token.
# every token is mapped to a unique number called token id
# vocabulary is sorted alphabetically
# vocabulary = {token: integer for integer, token in enumerate(all_words)}


class SimpleTokenizerV1(object):
    def __init__(self, vocabulary):
        #
        # vocabulary contains all unique token.
        # every token is mapped to a unique number called token id
        # vocabulary is sorted alphabetically
        self.str_to_int = vocabulary

        # we need another vocabulary to turn id to text
        self.int_to_str = {i: s for s, i in vocabulary.items()}

    def encode(self, text):
        preprocessed = re.split(r'([,.:;?_!"()\']|--|\s)', text)
        preprocessed = [item for item in preprocessed if item.strip()]
        ids = [self.str_to_int[s] for s in preprocessed]
        return ids

    def decode(self, ids):
        text = " ".join([self.int_to_str[id] for id in ids])
        text = re.sub(r'\s+([,.?!"()\'])', r'\1', text)
        return text


def make_vocabulary():
    with open("the-verdict.txt", "r", encoding="utf-8") as f:
        raw_text = f.read()
        print("Total number of character:", len(raw_text))
        preprocessed = re.split(r'([,.:;?_!"()\']|--|\s)', raw_text)
        preprocessed = [item for item in preprocessed if item.strip()]
        all_tokens = sorted(set(preprocessed))
        all_tokens.extend(["<|endoftext|>", "<|unk|>"])

        # vocabulary contains all unique token.
        # every token is mapped to a unique number called token id
        # vocabulary is sorted alphabetically
        vocabulary = {token: integer for integer, token in enumerate(all_tokens)}
        return vocabulary


class SimpleTokenizerV2(object):
    def __init__(self, vocabulary):
        #
        # vocabulary contains all unique token.
        # every token is mapped to a unique number called token id
        # vocabulary is sorted alphabetically
        self.str_to_int = vocabulary

        # we need another vocabulary to turn id to text
        self.int_to_str = {i: s for s, i in vocabulary.items()}

    def encode(self, text):
        preprocessed = re.split(r'([,.?_!"()\']|--|\s)', text)
        preprocessed = [item.strip() for item in preprocessed if item.strip()]
        # any token is not in vocabulary. then <|unk|> instead for avoid keyError
        preprocessed = [item if item in self.str_to_int else "<|unk|>" for item in preprocessed]
        ids = [self.str_to_int[s] for s in preprocessed]
        return ids

    def decode(self, ids):
        text = " ".join([self.int_to_str[i] for i in ids])
        text = re.sub(r'\s+([,.?!"()\'])', r'\1', text)
        return text

text1 = "Hello, do you like tea?"
text2 = "In the sunlit terraces of the palace."
text = " <|endoftext4|> ".join((text1, text2))
print(text)

vocabulary = make_vocabulary()
tokenizer = SimpleTokenizerV2(vocabulary)

# [1131, 5, 355, 1126, 628, 975, 10, 1130, 55, 988, 956, 984, 722, 988, 1131, 7]
print(tokenizer.encode(text))

# vocabulary doesn't contain words 'hello' and 'palace', so <|unk|> instead
# <|unk|>, do you like tea? <|endoftext|> In the sunlit terraces of the <|unk|>.
print(tokenizer.decode(tokenizer.encode(text)))

