#include <iostream>
#include <string>
#include <vector>
#include <set>
#include <stdexcept>

using namespace std;

// Token类型枚举
enum TokenType {
    KEYWORD,    // 保留字
    IDENTIFIER, // 标识符
    NUMBER,     // 数字
    OPERATOR,   // 运算符
    DELIMITER,  // 界符
    EOF_TOKEN   // 结束标志
};

// Token结构体
struct Token {
    TokenType type;
    string value;
    Token(TokenType t, const string& v) : type(t), value(v) {}
};

class Lexer {
private:
    string source;      // 源代码
    int pos;            // 当前字符位置
    char current_char;  // 当前字符
    set<string> keywords = { // PL/0保留字
        "const", "var", "procedure", "begin", "end",
        "if", "then", "while", "do", "call", "read", "write", "odd"
    };

    void advance() {
        pos++;
        if (pos < source.length()) {
            current_char = source[pos];
        } else {
            current_char = '\0'; // 结束标志
        }
    }

    void skip_whitespace() {
        while (current_char != '\0' && isspace(current_char)) {
            advance();
        }
    }

    Token parse_identifier_or_keyword() {
        string result;
        while (current_char != '\0' && (isalnum(current_char) || current_char == '_')) {
            result += current_char;
            advance();
        }
        if (keywords.find(result) != keywords.end()) {
            return Token(KEYWORD, result);
        } else {
            return Token(IDENTIFIER, result);
        }
    }

    Token parse_number() {
        string result;
        while (current_char != '\0' && isdigit(current_char)) {
            result += current_char;
            advance();
        }
        return Token(NUMBER, result);
    }

    Token parse_operator() {
        string op(1, current_char);
        advance();
        if (current_char != '\0') {
            string double_op = op + current_char;
            if (double_op == ">=" || double_op == "<=" || 
                double_op == "!=" || double_op == ":=" || 
                double_op == "<>") {
                advance();
                return Token(OPERATOR, double_op);
            }
        }
        return Token(OPERATOR, op);
    }

public:
    Lexer(const string& code) : source(code), pos(0) {
        if (!source.empty()) {
            current_char = source[0];
        } else {
            current_char = '\0';
        }
    }

    Token get_next_token() {
        while (current_char != '\0') {
            if (isspace(current_char)) {
                skip_whitespace();
                continue;
            }

            if (isalpha(current_char)) {
                return parse_identifier_or_keyword();
            }

            if (isdigit(current_char)) {
                return parse_number();
            }

            if (current_char == '+' || current_char == '-' || 
                current_char == '*' || current_char == '/' || 
                current_char == '=' || current_char == '#' || 
                current_char == '<' || current_char == '>' || 
                current_char == ':' || current_char == '!' || 
                current_char == '(' || current_char == ')' || 
                current_char == ',' || current_char == ';' || 
                current_char == '.') {
                if (current_char == ':' || current_char == '<' || 
                    current_char == '>' || current_char == '!') {
                    return parse_operator();
                } else {
                    TokenType type = (current_char == '+' || current_char == '-' || 
                                      current_char == '*' || current_char == '/') ? 
                                      OPERATOR : DELIMITER;
                    string value(1, current_char);
                    advance();
                    return Token(type, value);
                }
            }

            throw runtime_error("非法字符: " + string(1, current_char));
        }
        return Token(EOF_TOKEN, "");
    }
};

// 测试代码
int main() {
    string source_code = R"(
        const a=10;
        var b;
        begin
            b := a + 5;
            write(b);
        end.
    )";

    Lexer lexer(source_code);
    vector<Token> tokens;
    try {
        while (true) {
            Token token = lexer.get_next_token();
            if (token.type == EOF_TOKEN) break;
            tokens.push_back(token);
        }
    } catch (const runtime_error& e) {
        cerr << "词法错误: " << e.what() << endl;
        return 1;
    }

    cout << "词法分析结果：" << endl;
    for (const auto& token : tokens) {
        string type_str;
        switch (token.type) {
            case KEYWORD:    type_str = "KEYWORD"; break;
            case IDENTIFIER: type_str = "IDENTIFIER"; break;
            case NUMBER:     type_str = "NUMBER"; break;
            case OPERATOR:   type_str = "OPERATOR"; break;
            case DELIMITER: type_str = "DELIMITER"; break;
            default:        type_str = "UNKNOWN";
        }
        cout << type_str << ": " << token.value << endl;
    }
    return 0;
}