#coding=utf8

from scanner import Scanner
from ala_classes import TokenType as T, Token, AlaGeneralLexicalError


class PythonScanner(Scanner):

    def get_all_tokens(self):
        try:
            token = Token(T.NONE)
            prev_token = None
            tabs = 0
            prev_tabs = 0
            line_is_started = False
            current_line = 1
            while token.type != T.EOI:
                prev_token = token
                token = self.get_next_token(current_line)
                if token.type == T.TAB:
                    tabs += 1
                else:
                    if token.type == T.EOI:
                        line_is_started = False
                        prev_tabs = tabs
                        tabs = 0
                        self.tokens.append(Token(T.NEW_LINE, token_line=current_line))
                    if token.type == T.NEW_LINE:
                        line_is_started = False
                        current_line += 1
                        prev_tabs = tabs
                        tabs = 0
                        if prev_token.type != T.NEW_LINE:
                            self.tokens.append(token)
                    else:
                        if tabs != prev_tabs and not line_is_started:
                            if tabs > prev_tabs:
                                [self.tokens.append(Token(T.TAB_INC, token_line=current_line))
                                    for _ in xrange(tabs - prev_tabs)]
                            else:
                                [self.tokens.append(Token(T.TAB_DEC, token_line=current_line))
                                    for _ in xrange(prev_tabs - tabs)]
                            line_is_started = True
                        self.tokens.append(token)
            return self.tokens
        except AlaGeneralLexicalError as e:
            raise e
        except Exception:
            raise AlaGeneralLexicalError(u"Неизвестная лексическая ошибка")


    @staticmethod
    def __is_allowed_near_id(ch):
        """
        Определяет, может ли символ соседствовать с идентификатором или
        буквенным оператором. """
        return (PythonScanner.is_whitespace(ch) or ch == "(" or ch == ")" or ch == "[" or ch == "]" or ch == "<" or
                ch == ">" or ch == "=" or ch == "+" or ch == "-" or ch == "*" or ch == "/" or ch == "," or ch == ":")

    @staticmethod
    def __is_letter(ch):
        """
        Определяет, является ли символ буквой латинского алфавита """
        ch_l = ch.lower()
        return (ch_l == "a" or ch_l == "b" or ch_l == "c" or ch_l == "d" or
                ch_l == "e" or ch_l == "f" or ch_l == "g" or ch_l == "h" or
                ch_l == "i" or ch_l == "j" or ch_l == "k" or ch_l == "l" or
                ch_l == "m" or ch_l == "n" or ch_l == "o" or ch_l == "p" or
                ch_l == "q" or ch_l == "r" or ch_l == "s" or ch_l == "t" or
                ch_l == "u" or ch_l == "v" or ch_l == "w" or ch_l == "x" or
                ch_l == "y" or ch_l == "z" or ch_l == "_")

    @staticmethod
    def __is_allowed_not_first_in_id(ch):
        """
        Определяет, может ли символ находиться в идентификаторе не на первом месте """
        return PythonScanner.__is_letter(ch) or ch == "_" or ch == "-"

    @staticmethod
    def __is_starting_new_command(ch):
        """
        Определяет, может ли символ предшествовать новой команде """
        return (ch == "{" or ch == "}" or ch == ";" or ch == "(" or ch == "[" or
                PythonScanner.is_whitespace(ch))

    def __check_for_fragment(self, fragment):
        """
        Проверяет фрагмент на нахождение в тексте, начиная с позиции каретки """
        i = self.caret_position
        return self.__get_text_section(i, i + len(fragment)) == fragment

    def __check_for_word(self, word):
        """
        Проверяет фрагмент на нахождение в тексте, начиная с позиции каретки и окружающие его символы на возможность
        нахождения там """
        i = self.caret_position
        return (PythonScanner.__is_starting_new_command(self.__get_text_section(i - 1)) and
                self.__check_for_fragment(word) and
                PythonScanner.__is_allowed_near_id(self.__get_text_section(i + len(word))))

    def __get_text_section(self, i1, i2=None):
        """
        Возвращает срез текста, безопасный к IndexError.
        self.__get_text_section(i) == self.text[i]
        self.__get_text_section(i1, i2) == self.text[i1:i2]
        self.__get_text_section(i, 'end') == self.text[i:] """
        if i1 >= len(self.text) or i1 < 0:
            return u" "
        if i2 is None:
            return self.text[i1]
        else:
            if i2 == "end":
                return self.text[i1:]
            else:
                return self.text[i1:i2]

    def find_token(self):
        """
        Возвращает найденный в коде токен, начинающийся с позиции каретки """

        i = self.caret_position

        if self.__get_text_section(i, "end").rstrip() == "":
            return Token(T.EOI)
        elif self.__check_for_fragment("    "):
            return Token(T.TAB, "    ")
        elif self.__check_for_fragment("\t"):
            return Token(T.TAB, "\t")
        elif self.__get_text_section(i) == "\n":
            return Token(T.NEW_LINE, "\n")
        elif self.is_whitespace(self.__get_text_section(i)):
            return Token(T.WHITESPACE, self.__get_text_section(i))
        elif self.__check_for_word("if"):
            return Token(T.IF, "if")
        elif self.__check_for_word("elif"):
            return Token(T.ELIF, "elif")
        elif self.__check_for_word("else"):
            return Token(T.ELSE, "else")
        elif self.__check_for_word("while"):
            return Token(T.WHILE, "while")
        elif self.__check_for_word("for"):
            return Token(T.FOR, "for")
        elif self.__check_for_word("in"):
            return Token(T.IN, "in")
        elif self.__check_for_word("def"):
            return Token(T.DEF, "def")
        elif self.__check_for_word("True"):
            return Token(T.TRUE, "True")
        elif self.__check_for_word("False"):
            return Token(T.FALSE, "False")
        elif self.__check_for_word("and"):
            return Token(T.AND, "and")
        elif self.__check_for_word("or"):
            return Token(T.OR, "or")
        elif self.__check_for_word("not"):
            return Token(T.NOT, "not")
        elif self.__check_for_word("pass"):
            return Token(T.PASS, "pass")
        elif self.__check_for_word("return"):
            return Token(T.RETURN, "return")
        elif self.__check_for_word("print"):
            return Token(T.PRINT, "print")
        elif self.__get_text_section(i, i + 2) == "==":
            return Token(T.EQUAL, "==")
        elif self.__get_text_section(i, i + 2) == "!=":
            return Token(T.NOT_EQUAL, "!=")
        elif self.__get_text_section(i, i + 2) == "<=":
            return Token(T.LESS_E, "<=")
        elif self.__get_text_section(i, i + 2) == ">=":
            return Token(T.MORE_E, ">=")
        elif self.__get_text_section(i, i + 2) == "+=":
            return Token(T.ASSIGN_PLUS, "+=")
        elif self.__get_text_section(i, i + 2) == "-=":
            return Token(T.ASSIGN_MINUS, "-=")
        elif self.__get_text_section(i, i + 2) == "*=":
            return Token(T.ASSIGN_MULT, "*=")
        elif self.__get_text_section(i, i + 2) == "/=":
            return Token(T.ASSIGN_DIV, "/=")
        elif self.__get_text_section(i) == "<":
            return Token(T.LESS, "<")
        elif self.__get_text_section(i) == ">":
            return Token(T.MORE, ">")
        elif self.__get_text_section(i) == ":":
            return Token(T.COLON, ":")
        elif self.__get_text_section(i) == "=":
            return Token(T.ASSIGN, "=")
        elif self.__get_text_section(i) == "+":
            return Token(T.PLUS, "+")
        elif self.__get_text_section(i) == "-":
            return Token(T.MINUS, "-")
        elif self.__get_text_section(i) == "*":
            return Token(T.MULT, "*")
        elif self.__get_text_section(i) == "/":
            return Token(T.DIV, "/")
        elif self.__get_text_section(i) == "(":
            return Token(T.BR_L, "(")
        elif self.__get_text_section(i) == ")":
            return Token(T.BR_R, ")")
        elif self.__get_text_section(i) == "[":
            return Token(T.BR_Q_L, "[")
        elif self.__get_text_section(i) == "]":
            return Token(T.BR_Q_R, "]")
        elif self.__get_text_section(i) == ",":
            return Token(T.COMMA, ",")

        # Комментарий
        if self.__get_text_section(i) == "#":
            i1 = i
            while i1 < len(self.text) and self.__get_text_section(i1) != "\n":
                i1 += 1
            return Token(T.COMMENT, self.__get_text_section(i, i1))

        # Строка
        if self.__get_text_section(i) == '"' or self.__get_text_section(i) == "'":
            quote = self.__get_text_section(i)
            i1 = i + 1
            while i1 < len(self.text) and self.__get_text_section(i1) != quote:
                i1 += 1
                if i1 >= len(self.text) or self.__get_text_section(i1) == "\n":
                    return Token(T.ERROR, self.__get_text_section(i, i1))
            return Token(T.VAL_STR, self.__get_text_section(i, i1+1))

        # Идентификатор
        if self.__is_letter(self.__get_text_section(i)):
            i1 = 0
            while (self.__is_letter(self.__get_text_section(i + i1)) or
                   self.__get_text_section(i + i1).isnumeric() or
                   self.__is_allowed_not_first_in_id(self.__get_text_section(i + i1))):
                i1 += 1
            id = self.__get_text_section(i, i + i1)
            if not self.__is_allowed_near_id(self.__get_text_section(i - 1)):
                return Token(T.ERROR, id)
            if self.__is_allowed_near_id(self.__get_text_section(i + i1)):
                return Token(T.ID, id)

        # Целочисленное значение
        if self.__is_allowed_near_id(self.__get_text_section(i - 1)):
            if self.__get_text_section(i).isnumeric():
                i1 = 1
                while self.__get_text_section(i + i1).isnumeric():
                    i1 += 1
                value = self.__get_text_section(i, i + i1)
                if self.__is_allowed_near_id(self.__get_text_section(i + i1)):
                    return Token(T.VAL_INT, value)
                else:
                    return Token(T.ERROR, value)

        return Token(T.ERROR, self.__get_text_section(i))