from exception import TypeJsonParseError


class TokenType:
    def __init__(self):
        pass

    STRING = 'STRING'
    NUMBER = 'NUMBER'
    NULL = 'NULL'
    START_ARRAY = "START_ARRAY"
    END_ARRAY = "END_ARRAY"
    START_OBJ = "START_OBJ"
    END_OBJ = "END_OBJ"
    COMMA = "COMMA"
    COLON = "COLON"
    BOOLEAN = "BOOLEAN"
    END_DOC = "END_DOC"
    KEY = "KEY"
    START_SET = "START_SET"
    END_SET = "END_SET"
    VALID_NAME = "VALID_NAME"
    OPTIONAL = "OPTIONAL"
    START_GENERIC = "START_GENERIC"
    END_GENERIC = "END_GENERIC"


class Token:
    def __init__(self, typ, value, line_col):
        self.type = typ
        self.value = value
        self.line_col = line_col

    def __str__(self):
        return '[' + str(self.type) + ',' + str(self.value) + ',' + str(self.line_col) + ']'


class LineCol:
    def __init__(self, line, col):
        self.line = line
        self.col = col

    def __str__(self):
        return '(' + str(self.line) + ',' + str(self.col) + ')'


class Tokenizer:
    STRING = ['\"', '\'']
    VAR_NAME = "a b c d e f g h i j k l m n o p q r s t u v w x y z A B C D E F G H I J K L M N O P Q R S T " \
               "U V W X Y Z $ _".split(' ')
    NUMBER = "1 2 3 4 5 6 7 8 9 0".split(' ')
    SPACE = [" ", "\n", "\r", "\t"]
    SYMBOL = ["{", "}", "[", "]", "(", ")", ",", ":", "<", ">", "?"]
    ESCAPE = "\\"
    AVAILABLE_ESCAPE = ["\"", "\\", "\'", "/", "b", "f", "n", "t", "r", "u"]

    HEX = "0 1 2 3 4 5 6 7 8 9 a b c d e f A B C D E F".split(' ')

    SPLITOR = STRING + SPACE + SYMBOL

    def __init__(self, string):
        self.string = list(string)
        self.pos = 0
        if len(string) != 0:
            self.c = string[0]
        else:
            self.c = None
        self.line = 1
        self.col = 1
        self.col_recorder = {}

    def next(self):
        return self.__read_token()

    def __read_token(self):
        if self.c is None:
            return Token(TokenType.END_DOC, None, self.__line_col())
        while self.__is_space():
            self.__read_char()
            if self.c is None:
                return Token(TokenType.END_DOC, None, self.__line_col())

        line_col = self.__line_col()
        if self.c == '{':
            self.__read_char()
            return Token(TokenType.START_OBJ, '{', line_col)
        elif self.c == '}':
            self.__read_char()
            return Token(TokenType.END_OBJ, '}', line_col)
        elif self.c == '[':
            self.__read_char()
            return Token(TokenType.START_ARRAY, '[', line_col)
        elif self.c == ']':
            self.__read_char()
            return Token(TokenType.END_ARRAY, ']', line_col)
        elif self.c == '(':
            self.__read_char()
            return Token(TokenType.START_SET, '(', line_col)
        elif self.c == ')':
            self.__read_char()
            return Token(TokenType.END_SET, ')', line_col)
        elif self.c == '<':
            self.__read_char()
            return Token(TokenType.START_GENERIC, '<', line_col)
        elif self.c == '>':
            self.__read_char()
            return Token(TokenType.END_GENERIC, '>', line_col)
        elif self.c == ':':
            self.__read_char()
            return Token(TokenType.COLON, ':', line_col)
        elif self.c == ',':
            self.__read_char()
            return Token(TokenType.COMMA, ',', line_col)
        elif self.c == '?':
            self.__read_char()
            return Token(TokenType.OPTIONAL, '?', line_col)
        elif self.c is None:
            return Token(TokenType.END_DOC, None, line_col)
        elif self.c in Tokenizer.STRING:
            return self.__read_string()
        elif self.__is_number() or self.c == '-':
            return self.__read_number()
        token = self.__try_key()
        if token:
            return token
        if self.__is_var_name():
            return self.__read_var_name()
        raise TypeJsonParseError('tokenizer', 'unknown character `' + self.c + '`', self.__line_col())

    def __line_col(self):
        return LineCol(self.line, self.col)

    def __next_line(self):
        self.col_recorder[self.line] = self.col
        self.line += 1
        self.col = 0

    def __previous_line(self):
        self.line -= 1
        self.col = self.col_recorder[self.line]

    def __read_char(self):
        self.pos += 1
        self.col += 1
        if self.pos == len(self.string):
            self.c = None
            return
        self.c = self.string[self.pos]
        if self.c == '\r' or self.c == '\n':
            self.__next_line()

    def __unread(self, count):
        self.pos -= count
        self.c = self.string[self.pos]
        for ignore in range(0, count):
            if self.col == 1:
                self.__previous_line()
            else:
                self.col -= 1

    def __is_space(self):
        return self.c in Tokenizer.SPACE

    def __is_splitor(self):
        return self.c is None or self.c in Tokenizer.SPLITOR

    def __is_number(self):
        return self.c in Tokenizer.NUMBER

    def __is_var_name(self):
        return self.c in Tokenizer.VAR_NAME

    def __try_key(self):
        line_col = self.__line_col()
        if self.c == 'n':  # null
            self.__read_char()
            if self.c == 'u':
                self.__read_char()
                if self.c == 'l':
                    self.__read_char()
                    if self.c == 'l':
                        self.__read_char()
                        if self.__is_splitor():
                            return Token(TokenType.NULL, None, line_col)
                        else:
                            self.__unread(4)
                            return None
                    else:
                        self.__unread(3)
                        return None
                else:
                    self.__unread(2)
                    return None
            else:
                self.__unread(1)
                return None
        elif self.c == 'u':  # use
            self.__read_char()
            if self.c == 's':
                self.__read_char()
                if self.c == 'e':
                    self.__read_char()
                    if self.__is_splitor:
                        return Token(TokenType.KEY, 'use', line_col)
                    else:
                        self.__unread(3)
                        return None
                else:
                    self.__unread(2)
                    return None
            else:
                self.__unread(1)
                return None
        elif self.c == 'd':  # def
            self.__read_char()
            if self.c == 'e':
                self.__read_char()
                if self.c == 'f':
                    self.__read_char()
                    if self.__is_splitor():
                        return Token(TokenType.KEY, 'def', line_col)
                    else:
                        self.__unread(3)
                        return None
                else:
                    self.__unread(2)
                    return None
            else:
                self.__unread(1)
                return None
        elif self.c == 't':  # true
            self.__read_char()
            if self.c == 'r':
                self.__read_char()
                if self.c == 'u':
                    self.__read_char()
                    if self.c == 'e':
                        self.__read_char()
                        if self.__is_splitor():
                            return Token(TokenType.BOOLEAN, "true", line_col)
                        else:
                            self.__unread(4)
                            return None
                    else:
                        self.__unread(3)
                        return None
                else:
                    self.__unread(2)
                    return None
            else:
                self.__unread(1)
                return None
        elif self.c == 'f':  # false
            self.__read_char()
            if self.c == 'a':
                self.__read_char()
                if self.c == 'l':
                    self.__read_char()
                    if self.c == 's':
                        self.__read_char()
                        if self.c == 'e':
                            self.__read_char()
                            if self.__is_splitor():
                                return Token(TokenType.BOOLEAN, "false", line_col)
                            else:
                                self.__unread(5)
                                return None
                        else:
                            self.__unread(4)
                            return None
                    else:
                        self.__unread(3)
                        return None
                else:
                    self.__unread(2)
                    return None
            else:
                self.__unread(1)
                return None
        elif self.c == 'm':  # module
            self.__read_char()
            if self.c == 'o':
                self.__read_char()
                if self.c == 'd':
                    self.__read_char()
                    if self.c == 'u':
                        self.__read_char()
                        if self.c == 'l':
                            self.__read_char()
                            if self.c == 'e':
                                self.__read_char()
                                if self.__is_splitor():
                                    return Token(TokenType.KEY, "module", line_col)
                                else:
                                    self.__unread(6)
                                    return None
                            else:
                                self.__unread(5)
                                return None
                        else:
                            self.__unread(4)
                            return None
                    else:
                        self.__unread(3)
                        return None
                else:
                    self.__unread(2)
                    return None
            else:
                self.__unread(1)
                return None
        else:
            return None

    def __read_number(self):
        line_col = self.__line_col()
        r = []
        if self.c == '-':  # -
            r.append('-')
            self.__read_char()
            self.__number_init(r)
        else:
            self.__number_init(r)
        return Token(TokenType.NUMBER, ''.join(r), line_col)

    def __number_init(self, r):
        if self.c == '0':  # -0
            r.append('0')
            self.__read_char()
            self.__number(r)
        elif self.__is_number():  # -[1-9]
            r.append(self.c)
            self.__read_char()
            while self.__is_number():
                r.append(self.c)
                self.__read_char()
            self.__number(r)
        else:
            raise TypeJsonParseError('tokenizer', 'expecting digit, but got `' + self.c + '` instead',
                                     self.__line_col())

    def __number(self, r):
        if self.c == '.':  # dot
            r.append('.')
            self.__read_char()
            self.__num_digit(r)
            if self.c == 'e' or self.c == 'E':
                r.append(self.c)
                self.__read_char()
                self.__num_exp(r)
        elif self.c == 'e' or self.c == 'E':
            r.append(self.c)
            self.__read_char()
            self.__num_exp(r)

    def __num_digit(self, r):
        is_first = True
        while True:
            if self.__is_number():
                r.append(self.c)
                self.__read_char()
            elif self.__is_splitor() or self.c == 'e' or self.c == 'E':
                if is_first:
                    raise TypeJsonParseError('tokenizer', 'expecting digit, but got `' + self.c + '` instead',
                                             self.__line_col())
                return r
            else:
                raise TypeJsonParseError('tokenizer', 'expecting digit, but got `' + self.c + '` instead',
                                         self.__line_col())
            is_first = False

    def __num_exp(self, r):
        if self.c == '-' or self.c == '+':
            r.append(self.c)
            self.__read_char()
            self.__num_digit(r)
        else:
            self.__num_digit(r)

    def __read_string(self):
        line_col = self.__line_col()
        starter = self.c
        r = []
        while True:
            self.__read_char()
            if self.__is_escape():
                if self.c == 'u':
                    r.append('\\')
                    r.append(self.c)
                    for i in range(0, 4):
                        self.__read_char()
                        if self.__is_hex():
                            r.append(self.c)
                        else:
                            raise TypeJsonParseError('tokenizer',
                                                     'expecting hex number, but got ' + self.c + ' instead',
                                                     self.__line_col())
                else:
                    r.append('\\')
                    r.append(self.c)
            elif self.c == starter:
                self.__read_char()
                return Token(TokenType.STRING, ''.join(r), line_col)
            elif self.c == '\r' or self.c == '\n' or self.c is None:
                raise TypeJsonParseError('tokenizer', 'expecting `' + starter + '`, but got end of line instead',
                                         self.__line_col())
            else:
                r.append(self.c)

    def __is_hex(self):
        return self.c in Tokenizer.HEX

    def __is_escape(self):
        if self.c == Tokenizer.ESCAPE:
            self.__read_char()
            if self.c in Tokenizer.AVAILABLE_ESCAPE:
                return True
            else:
                raise TypeJsonParseError('tokenizer', 'cannnot escape `' + self.c + '`', self.__line_col())
        else:
            return False

    def __read_var_name(self):
        r = [self.c]
        line_col = self.__line_col()
        while True:
            self.__read_char()
            if self.__is_var_name() or self.__is_var_name():
                r.append(self.c)
            elif self.__is_splitor():
                return Token(TokenType.VALID_NAME, ''.join(r), line_col)
            else:
                raise TypeJsonParseError('tokenizer', 'unexpected `' + self.c + '`', self.__line_col())
