# 定义 Simple 语言的关键字、运算符、分隔符
keywords = {"if", "else", "while", "for", "int", "float", "return"}
operators = {
    "+", "-", "*", "/", "=", "==", "!=", "<", "<=", ">", ">=", "&&", "||"
}
delimiters = {";", "(", ")", "{", "}", ",", ":"}


# DFA 状态转移表
dfa = {
    'start': {
        '_': 'identifier_start',
        **{chr(x): 'identifier_start' for x in range(ord('a'), ord('z'))},
        **{chr(x): 'identifier_start' for x in range(ord('A'), ord('Z'))},
        **{str(d): 'number_start' for d in range(10)},
        '=': 'equal',
        '!': 'not_equal',
        '<': 'less_than',
        '>': 'greater_than',
        '&': 'and',
        '|': 'or',
        '+': 'operator_plus',
        '-': 'operator_minus',
        '*': 'operator_multiply',
        '/': 'operator_divide',
        ';': 'delimiter_semicolon',
        ',': 'delimiter_comma',
        '(': 'delimiter_open_paren',
        ')': 'delimiter_close_paren',
        '{': 'delimiter_open_brace',
        '}': 'delimiter_close_brace',
        ':': 'delimiter_colon'
    },
    'identifier_start': {
        '_': 'identifier_body',
        **{chr(x): 'identifier_body' for x in range(ord('a'), ord('z'))},
        **{chr(x): 'identifier_body' for x in range(ord('A'), ord('Z'))},
        **{str(d): 'identifier_body' for d in range(10)}
    },
    'identifier_body': {
        '_': 'identifier_body',
        **{chr(x): 'identifier_body' for x in range(ord('a'), ord('z'))},
        **{chr(x): 'identifier_body' for x in range(ord('A'), ord('Z'))},
        **{str(d): 'identifier_body' for d in range(10)}
    },
    'number_start': {
        **{str(d): 'number_body' for d in range(10)},
        '.': 'fractional_part',
        'e': 'exponent_sign',
    },
    'number_body': {
        **{str(d): 'number_body' for d in range(10)},
        '.': 'decimal_point',
        'e': 'exponent_sign',
        'E': 'exponent_sign'
    },
    'decimal_point': {
        **{str(d): 'fractional_part' for d in range(10)}
    },
    'fractional_part': {
        **{str(d): 'fractional_part' for d in range(10)},
        'e': 'exponent_sign',
        'E': 'exponent_sign'
    },
    'exponent_sign': {
        '+': 'exponent_number',
        '-': 'exponent_number',
        **{str(d): 'exponent_number' for d in range(10)}
    },
    'exponent_number': {
        **{str(d): 'exponent_number' for d in range(10)}
    },
    'equal': {
        '=': 'equal_equal'
    },
    'not_equal': {
        '=': 'not_equal_equal'
    },
    'less_than': {
        '=': 'less_than_equal'
    },
    'greater_than': {
        '=': 'greater_than_equal'
    },
    'and': {
        '&': 'and_and'
    },
    'or': {
        '|': 'or_or'
    }
}

# 接受状态及其处理函数
accept_states = {
    'identifier_start': lambda value: ('keyword', value) if value in keywords else ('identifier', value),
    'identifier_body': lambda value: ('keyword', value) if value in keywords else ('identifier', value),
    'number_start': lambda value: ('number', int(value)),
    'number_body': lambda value: ('number', int(value)),
    'fractional_part': lambda value: ('number', float(value)),
    'exponent_number': lambda value: ('number', float(value)),
    'equal': lambda _: ('operator', '='),
    'equal_equal': lambda _: ('operator', '=='),
    'not_equal_equal': lambda _: ('operator', '!='),
    'less_than': lambda _: ('operator', '<'),
    'less_than_equal': lambda _: ('operator', '<='),
    'greater_than': lambda _: ('operator', '>'),
    'greater_than_equal': lambda _: ('operator', '>='),
    'and': lambda _: ('operator', '&'),
    'and_and': lambda _: ('operator', '&&'),
    'or': lambda _: ('operator', '|'),
    'or_or': lambda _: ('operator', '||'),
    'operator_plus': lambda _: ('operator', '+'),
    'operator_minus': lambda _: ('operator', '-'),
    'operator_multiply': lambda _: ('operator', '*'),
    'operator_divide': lambda _: ('operator', '/'),
    'delimiter_semicolon': lambda _: ('delimiter', ';'),
    'delimiter_comma': lambda _: ('delimiter', ','),
    'delimiter_open_paren': lambda _: ('delimiter', '('),
    'delimiter_close_paren': lambda _: ('delimiter', ')'),
    'delimiter_open_brace': lambda _: ('delimiter', '{'),
    'delimiter_close_brace': lambda _: ('delimiter', '}'),
    'delimiter_colon': lambda _: ('delimiter', ':')
}
def tokenize(code):
    tokens = []
    line_no = 1
    col_no = 1
    i = 0
    len_code = len(code)

    while i < len_code:
        char = code[i]

        # 跳过空白和注释...
        if char.isspace():
            if char == '\n':
                line_no += 1
                col_no = 1
            elif char == '\t':
                col_no += 4
            else:
                col_no += 1
            i += 1
            continue

        # 注释处理略...

        state = 'start'
        value = ''
        last_accept_state = None
        last_accept_pos = i
        start_line = line_no
        start_col = col_no

        while i < len_code and state in dfa:
            char = code[i]
            next_state = dfa[state].get(char)

            if next_state:
                value += char
                if char == '\n':
                    line_no += 1
                    col_no = 1
                elif char == '\t':
                    col_no += 4
                else:
                    col_no += 1
                i += 1
                state = next_state

                # 如果当前状态是 accept 状态，记录位置
                if state in accept_states:
                    last_accept_state = state
                    last_accept_pos = i
            else:
                break

        # 如果有 accept 状态，提交 token
        if last_accept_state:
            accepted_value = value[:last_accept_pos - (i - len(value))]
            token_type, token_value = accept_states[last_accept_state](accepted_value)
            tokens.append((token_type, token_value, start_line, start_col))
        elif state != 'start':
            # 没有 accept 状态，但 buffer 有内容 → 非法 token
            print(f"Error at Line {start_line}, Column {start_col}: Invalid token '{value}'")
            i += 1
        else:
            # 完全不认识这个字符
            print(f"Unexpected character at Line {line_no}, Column {col_no}: '{char}'")
            i += 1

    return tokens

def main():
    import sys

    if len(sys.argv) < 2:
        print("Usage: python lexer.py <input_file>")
        return

    input_file = sys.argv[1]
    output_file = input_file.rsplit('.', 1)[0] + "_tokens.txt"

    try:
        with open(input_file, 'r', encoding='utf-8') as f:
            code = f.read()
    except FileNotFoundError:
        print(f"File not found: {input_file}")
        return

    tokens = tokenize(code)

    with open(output_file, 'w', encoding='utf-8') as f:
        for token in tokens:
            token_type, token_value, line, col = token
            print(f"({token_type}, {token_value})")
            f.write(f"({token_type}, {token_value})\n")

    print(f"\nTokens written to file: {output_file}")

if __name__ == '__main__':
    main()
