# -*- coding: utf-8 -*-
"""
-------------------------------------------------
   File Name：     tokenizer
   Description : 分词器
   Author :       Flyoung
   date：          2023/3/5
-------------------------------------------------
   Change Activity:
                   2023/3/5:
-------------------------------------------------
"""
from compiler.common.utils import unique
from compiler.lexical.lib_utils import (
    is_digit,
    is_alpha,
    is_blank,
    is_notes,
    is_sign,
    is_empty_str,
    is_reserved_word,
    is_var_type_reserved_words,
    is_equal_sign,
    is_constant,
    contain_alpha,
    get_sign_encode,
)
from typing import List, AnyStr


class Tokenizer:
    """
    分词器
    """

    @staticmethod
    def parse_result(result: List[AnyStr]):
        """
        展示分词结果
        :param result:
        :return:
        """
        for item in result:
            if is_sign(item):
                print(f'<{item}, {get_sign_encode(item)}>')
            elif is_reserved_word(item):
                print(f'<{item}, reserved word>')
            else:
                print(f'<{item}, user defined>')

    @staticmethod
    def parse_line(line: AnyStr):
        """
        单行分词
        :param line:
        :return:
        """
        result = []

        def append_result(content):
            if not is_empty_str(content):
                result.append(content)
            return ""

        buffer = ""
        # 是否跳过当前字符
        _pass = False
        for index, char in enumerate(line):
            if _pass:
                _pass = False
                continue
            if is_digit(char) or is_alpha(char):
                buffer += char
            else:
                # 缓冲区内有内容, 输出
                if len(buffer) > 0:
                    buffer = append_result(buffer)
                # 空白内容
                if is_blank(char):
                    continue
                # 判断是否是特殊符号
                if index + 1 < len(line):
                    next_chars = line[index:index + 2]
                    # 注释
                    if is_notes(next_chars):
                        break
                    elif is_sign(next_chars):
                        buffer = append_result(line[index:index + 2])
                        _pass = True
                        continue
                # 普通符号
                buffer = append_result(char)

        return result

    @staticmethod
    def parse_lines(lines: List[AnyStr]):
        """
        多行分词
        :param lines:
        :return:
        """
        result = []
        for line in lines:
            result.extend(Tokenizer.parse_line(line))
        return result

    @staticmethod
    def separate_variables_single_line(line: AnyStr):
        """
        分离出 line 中的变量
        :param line:
        :return:
        """
        variables = []
        result = Tokenizer.parse_line(line)
        for idx in range(1, len(result) - 1):
            pre_item = result[idx - 1]
            next_item = result[idx + 1]
            if is_var_type_reserved_words(pre_item) and is_equal_sign(next_item):
                variables.append(result[idx])
        for item in result:
            if sum([
                is_constant(item),
                is_reserved_word(item),
                is_sign(item),
                is_digit(item),
            ]) == 0 and contain_alpha(item):
                variables.append(item)
        return unique(variables)

    @staticmethod
    def separate_variables(lines: List[AnyStr]):
        """
        分离出每一行的变量
        :param lines:
        :return:
        """
        variables = []
        for line in lines:
            variables.extend(Tokenizer.separate_variables_single_line(line))
        return unique(variables)
