import os


def readlines(filename, encodings=['utf-8', 'gbk']):
    """
        只处理utf-8和gbk两种编码
        """
    for encoding in encodings:
        try:
            lines = open(filename, encoding=encoding).readlines()
            return lines
        except UnicodeDecodeError as e:
            pass  # 其他编码，不处理
    return None


# 确定c是否是标识符
def isCharValue(c: str):
    return c.isalnum() or c == '_'


def get_tokens(line):
    """
    获取一行中的所有token，把一个标点符号单算一个token
    """
    tokens = []
    s = ''
    for c in line:
        if isCharValue(c):
            s = s + c
        else:
            if len(s) > 0:
                tokens.append(s)
            if not c.isspace():
                tokens.append(c)
            s = ''
    # 处理最后一个token是标识符
    if len(s) > 0:
        tokens.append(s)
    return tokens


def merge_by_quotes(tokens):
    """
    单引号和双引号中间的内容都不处理，默认引号有最高优先级
    """
    new_tokens = []
    merge_tokens = []
    match = None
    for t in tokens:
        if match == None:  # 未出现引号
            if t != '"' and t != "'":
                new_tokens.append(t)
            else:  # 出现一个引号
                match = t
                merge_tokens.append(t)
        else:
            if t == match:  # 一对引号都已出现
                merge_tokens.append(t)
                new_tokens.append("".join(merge_tokens))
                merge_tokens = []
                match = None
            else:  # 只出现左侧引号
                merge_tokens.append(t)

    return new_tokens


def get_tokens_list(lines):
    """
    把读入的行按token解析分隔开，引号中间的不处理
    """
    tokens_list = []
    for line in lines:
        tokens = get_tokens(line)
        tokens = merge_by_quotes(tokens)
        tokens_list.append(tokens)

    return tokens_list


def find_token(tokens, key_tokens):
    """
    查看tokens中是否出现了key_tokens中的token,出现了就返回该token
    """
    for token in tokens:
        for key in key_tokens:
            if token == key:
                return key
    return None


def valid_brace(tokens):
    """
    count { } in one record
    """
    brace = 0
    for token in tokens:
        if token == "{":
            brace += 1
        elif token == "}":
            brace -= 1
    return brace


def split_record(line_list, keywords=["class", "def", "let", "defm", "multiclass", "foreach"],
                 one_line_keywords=["include"]):
    """
    把所有行按记录分隔开
    keywords: 以什么开头作为一条record开头，以{...}或者；结束为一个record
    one_line_keywords: 以什么开头的行单独成一个record
    """
    record_list, merged_stmt = [], []
    start_keyword = False
    braces = 0
    stage = 0

    tokens_list = get_tokens_list(line_list)
    #     print("**************************************")
    #     print(tokens_list)
    #     print("**************************************")
    for tokens in tokens_list:
        if tokens[0] in one_line_keywords:  # 如果第一个token是include, 直接把这一行当成一个记录存起来
            record_list.append(tokens)
            continue
        for token in tokens:
            if not start_keyword:  # 如果第一个字符是关键字，设置start为true， 不在列表的关键字不处理
                if token in keywords:
                    start_keyword = True
                else:
                    print("ignore", token)
            if start_keyword:
                merged_stmt.append(token)
                if stage == 0:  # 初始状态，查看当前token是否是结束符 : / { }
                    token = find_token(token, [";", "{"])
                    if token == ";":
                        stage = 1
                    elif token == "{":
                        stage = 2
                if stage == 1:  # 当前token是分号，该记录结束，恢复初始状态
                    record_list.append(merged_stmt)
                    merged_stmt = []
                    stage = 0
                if stage == 2:  # 当前记录以{}结尾，当左右花括号匹配时，该记录结束，恢复初始状态
                    braces += valid_brace(token)
                    if braces == 0:
                        record_list.append(merged_stmt)
                        merged_stmt = []
                        stage = 0
    return record_list


def get_document_records(file_name, annotation_start="//",
                         record_start_keywords=["class", "def", "let", "defm", "multiclass", "foreach"],
                         record_one_line_keywords=["include"]):
    """
    从文件中按行读入，并分隔出每个记录
    """
    line_list = []
    lines = readlines(file_name)

    for line in lines:
        nline = line.strip()  # 去除开头和结尾的空格
        nline = nline.strip('\n')  # 去除换行符
        if not len(nline) or nline.startswith(annotation_start):  # 去除空行和注释行
            continue
        if nline.find(annotation_start):
            head, sep, tail = nline.partition(annotation_start)
            nline = head
        line_list.append(nline)

    record_list = split_record(line_list, record_start_keywords, record_one_line_keywords)

    return record_list


def fmt_print(tokens: list, indent=''): # indent: 缩进
    if isinstance(tokens[0], str):
        # ["class", "ISA_MIPS1", "{"]
        return indent + " ".join(tokens)  # 使用空格将tokens连接起来
    elif isinstance(tokens[0], list):
        # [["class", "ISA_MIPS1", "{"], ... ["def", "A", ";"]]
        return "\n".join([indent + fmt_print(token, indent+' ') for token in tokens])
