/* picoc lexer - converts source text into a tokenised form */
#include "lex.h"
#include "table.h"  
#include "variable.h"
#include <math.h>
#include <ctype.h>
#include "platform.h"
#include "parse.h"
#define isCidstart(c) (isalpha(c) || (c)=='_' || (c)=='#')
#define isCident(c) (isalnum(c) || (c)=='_')

#define IS_HEX_ALPHA_DIGIT(c) (((c) >= 'a' && (c) <= 'f') || ((c) >= 'A' && (c) <= 'F'))
#define IS_BASE_DIGIT(c,b) (((c) >= '0' && (c) < '0' + (((b)<10)?(b):10)) || (((b) > 10) ? IS_HEX_ALPHA_DIGIT(c) : false))
#define GET_BASE_DIGIT(c) (((c) <= '9') ? ((c) - '0') : (((c) <= 'F') ? ((c) - 'A' + 10) : ((c) - 'a' + 10)))

#define NEXTIS(c,x,y) { if (nextChar == (c)) { LEXER_INC(lexer); gotToken = (x); } else gotToken = (y); }
#define NEXTIS3(c,x,d,y,z) { if (nextChar == (c)) { LEXER_INC(lexer); gotToken = (x); } else NEXTIS(d,y,z) }
#define NEXTIS4(c,x,d,y,e,z,a) { if (nextChar == (c)) { LEXER_INC(lexer); gotToken = (x); } else NEXTIS3(d,y,e,z,a) }
#define NEXTIS3PLUS(c,x,d,y,e,z,a) { if (nextChar == (c)) { LEXER_INC(lexer); gotToken = (x); } else if (nextChar == (d)) { if (lexer->pos[1] == (e)) { LEXER_INCN(lexer, 2); gotToken = (z); } else { LEXER_INC(lexer); gotToken = (y); } } else gotToken = (a); }
#define NEXTISEXACTLY3(c,d,y,z) { if (nextChar == (c) && lexer->pos[1] == (d)) { LEXER_INCN(lexer, 2); gotToken = (y); } else gotToken = (z); }

#define LEXER_INC(l) ( (l)->pos++, (l)->characterPos++ )
#define LEXER_INCN(l, n) ( (l)->pos+=(n), (l)->characterPos+=(n) )
#define TOKEN_DATA_OFFSET (2)

/* maximum value which can be represented by a "char" data type */
#define MAX_CHAR_VALUE (255)




const Lex::ReservedWord Lex::kReservedWords[] = {
    /* wtf, when optimizations are set escaping certain chars is required or they disappear */
    {"#define", kTokenHashDefine},
    {"#else", kTokenHashElse},
    {"#endif", kTokenHashEndif},
    {"#if", kTokenHashIf},
    {"#ifdef", kTokenHashIfdef},
    {"#ifndef", kTokenHashIfndef},
    {"#include", kTokenHashInclude},
    {"auto", kTokenAutoType},
    {"break", kTokenBreak},
    {"case", kTokenCase},
    {"char", kTokenCharType},
    {"continue", kTokenContinue},
    {"default", kTokenDefault},
    {"delete", kTokenDelete},
    {"do", kTokenDo},
    {"double", kTokenDoubleType},
    {"else", kTokenElse},
    {"enum", kTokenEnumType},
    {"extern", kTokenExternType},
    {"float", kTokenFloatType},
    {"for", kTokenFor},
    {"goto", kTokenGoto},
    {"if", kTokenIf},
    {"int", kTokenIntType},
    {"long", kTokenLongType},
    {"new", kTokenNew},
    {"register", kTokenRegisterType},
    {"return", kTokenReturn},
    {"short", kTokenShortType},
    {"signed", kTokenSignedType},
    {"sizeof", kTokenSizeof},
    {"static", kTokenStaticType},
    {"struct", kTokenStructType},
    {"switch", kTokenSwitch},
    {"typedef", kTokenTypedef},
    {"union", kTokenUnionType},
    {"unsigned", kTokenUnsignedType},
    {"void", kTokenVoidType},
    {"while", kTokenWhile}
};

Lex::Lex() {
}

/* initialize the lexer */
void Lex::LexInit(Interpreter *pc) {

    MyTable::TableInitTable(&pc->reservedWordTable, &pc->reservedWordHashTable[0],
        sizeof(kReservedWords) / sizeof(ReservedWord)*2, true);

    for (int i = 0; i < sizeof(kReservedWords) / sizeof(ReservedWord);
            i++) {
        MyTable::TableSet(pc, &pc->reservedWordTable,
                          MyTable::TableStrRegister(pc, kReservedWords[i].word),
            (Value*)&kReservedWords[i], NULL, 0, 0);
    }

    pc->lexValue.typ = NULL;
    pc->lexValue.val = &pc->lexAnyValue;
    pc->lexValue.lValueFrom = false;
    pc->lexValue.valOnHeap = false;
    pc->lexValue.valOnStack = false;
    pc->lexValue.anyValOnHeap = false;
    pc->lexValue.isLValue = false;
}

/* deallocate */
void Lex::LexCleanup(Interpreter *pc) {
    LexInteractiveClear(pc, NULL);

    for (int i = 0; i < sizeof(kReservedWords) / sizeof(ReservedWord);
            i++)
        MyTable::TableDelete(pc, &pc->reservedWordTable,
                             MyTable::TableStrRegister(pc, kReservedWords[i].word));
}

/* check if a word is a reserved word - used while scanning */
LexToken Lex::LexCheckReservedWord(Interpreter *pc, const char *word) {
    Value *val;

    if (MyTable::TableGet(&pc->reservedWordTable, word, &val, NULL, NULL, NULL))
        return ((ReservedWord*)val)->token;
    else
        return kTokenNone;
}

/* get a numeric literal - used while scanning */
LexToken Lex::LexGetNumber(Interpreter *pc, LexState *lexer, Value *value) {
    long result = 0;
    long base = 10;
    LexToken resultToken;
    double fPResult;
    double fPDiv;
    /* long/unsigned flags */
#if 0 /* unused for now */
    char isLong = 0;
    char isUnsigned = 0;
#endif

    if (*lexer->pos == '0') {
        /* a binary, octal or hex literal */
        LEXER_INC(lexer);
        if (lexer->pos != lexer->end) {
            if (*lexer->pos == 'x' || *lexer->pos == 'X') {
                base = 16; LEXER_INC(lexer);
            } else if (*lexer->pos == 'b' || *lexer->pos == 'B') {
                base = 2; LEXER_INC(lexer);
            } else if (*lexer->pos != '.')
                base = 8;
        }
    }

    /* get the value */
    for (; lexer->pos != lexer->end && IS_BASE_DIGIT(*lexer->pos, base);
            LEXER_INC(lexer))
        result = result * base + GET_BASE_DIGIT(*lexer->pos);

    if (*lexer->pos == 'u' || *lexer->pos == 'U') {
        LEXER_INC(lexer);
        /* IsUnsigned = 1; */
    }
    if (*lexer->pos == 'l' || *lexer->pos == 'L') {
        LEXER_INC(lexer);
        /* IsLong = 1; */
    }

    value->typ = &pc->longType; /* ignored? */
    value->val->longInteger = result;

    resultToken = kTokenIntegerConstant;

    if (lexer->pos == lexer->end)
        return resultToken;

    if (lexer->pos == lexer->end) {
        return resultToken;
    }

    if (*lexer->pos != '.' && *lexer->pos != 'e' && *lexer->pos != 'E') {
        return resultToken;
    }

    value->typ = &pc->fPType;
    fPResult = (double)result;

    if (*lexer->pos == '.') {
        LEXER_INC(lexer);
        for (fPDiv = 1.0/base; lexer->pos != lexer->end && IS_BASE_DIGIT(*lexer->pos, base);
                LEXER_INC(lexer), fPDiv /= (double)base) {
            fPResult += GET_BASE_DIGIT(*lexer->pos) * fPDiv;
        }
    }

    if (lexer->pos != lexer->end && (*lexer->pos == 'e' || *lexer->pos == 'E')) {
        int exponentSign = 1;

        LEXER_INC(lexer);
        if (lexer->pos != lexer->end && *lexer->pos == '-') {
            exponentSign = -1;
            LEXER_INC(lexer);
        }

        result = 0;
        while (lexer->pos != lexer->end && IS_BASE_DIGIT(*lexer->pos, base)) {
            result = result * base + GET_BASE_DIGIT(*lexer->pos);
            LEXER_INC(lexer);
        }

        fPResult *= pow((double)base, (double)result * exponentSign);
    }

    value->val->fP = fPResult;

    if (*lexer->pos == 'f' || *lexer->pos == 'F')
        LEXER_INC(lexer);

    return kTokenFPConstant;
}

/* get a reserved word or identifier - used while scanning */
LexToken Lex::LexGetWord(Interpreter *pc, LexState *lexer, Value *value) {
    const char *kStartPos = lexer->pos;
    LexToken token;

    do {
        LEXER_INC(lexer);
    } while (lexer->pos != lexer->end && isCident((int)*lexer->pos));

    value->typ = NULL;
    value->val->identifier = MyTable::TableStrRegister2(pc, kStartPos, lexer->pos - kStartPos);

    token = LexCheckReservedWord(pc, value->val->identifier);
    switch (token) {
    case kTokenHashInclude:
        lexer->mode = kLexModeHashInclude;
        break;
    case kTokenHashDefine:
        lexer->mode = kLexModeHashDefine;
        break;
    default:
        break;
    }

    if (token != kTokenNone)
        return token;

    if (lexer->mode == kLexModeHashDefineSpace)
        lexer->mode = kLexModeHashDefineSpaceIdent;

    return kTokenIdentifier;
}

/* unescape a character from an octal character constant */
unsigned char Lex::LexUnEscapeCharacterConstant(const char **from,
    unsigned char firstChar, int base) {
    unsigned char total = GET_BASE_DIGIT(firstChar);
    for (int i = 0; IS_BASE_DIGIT(**from, base) && i < 2; i++, (*from)++)
        total = total * base + GET_BASE_DIGIT(**from);

    return total;
}

/* unescape a character from a string or character constant */
unsigned char Lex::LexUnEscapeCharacter(const char **from, const char *end) {
    unsigned char thisChar;

    while (*from != end && **from == '\\' &&
            &(*from)[1] != end && (*from)[1] == '\n' )
        (*from) += 2;  /* skip escaped end of lines with LF line termination */

    while (*from != end && **from == '\\' &&
            &(*from)[1] != end &&
            &(*from)[2] != end && (*from)[1] == '\r' && (*from)[2] == '\n')
        (*from) += 3;  /* skip escaped end of lines with CR/LF line termination */

    if (*from == end)
        return '\\';

    if (**from == '\\') {
        /* it's escaped */
        (*from)++;
        if (*from == end)
            return '\\';

        thisChar = *(*from)++;
        switch (thisChar) {
        case '\\':
            return '\\';
        case '\'':
            return '\'';
        case '"':
            return '"';
        case 'a':
            return '\a';
        case 'b':
            return '\b';
        case 'f':
            return '\f';
        case 'n':
            return '\n';
        case 'r':
            return '\r';
        case 't':
            return '\t';
        case 'v':
            return '\v';
        case '0':
        case '1':
        case '2':
        case '3':
            return LexUnEscapeCharacterConstant(from, thisChar, 8);
        case 'x':
            return LexUnEscapeCharacterConstant(from, '0', 16);
        default:
            return thisChar;
        }
    }
    else
        return *(*from)++;
}

/* get a string constant - used while scanning */
LexToken Lex::LexGetStringConstant(Interpreter *pc, LexState *lexer,
    Value *value, char endChar) {
    int escape = false;
    const char *startPos = lexer->pos;
    const char *endPos;
    char *escBuf;
    char *escBufPos;
    char *regString;
    Value *arrayValue;

    while (lexer->pos != lexer->end && (*lexer->pos != endChar || escape)) {
        /* find the end */
        if (escape) {
            if (*lexer->pos == '\r' && lexer->pos+1 != lexer->end)
                lexer->pos++;

            if (*lexer->pos == '\n' && lexer->pos+1 != lexer->end) {
                lexer->line++;
                lexer->pos++;
                lexer->characterPos = 0;
                lexer->emitExtraNewlines++;
            }

            escape = false;
        } else if (*lexer->pos == '\\')
            escape = true;

        LEXER_INC(lexer);
    }
    endPos = lexer->pos;

    escBuf = (char *)Interpreter::HeapAllocStack(endPos - startPos);
    if (escBuf == NULL)
        Platform::Platform::LexFail(pc, lexer, "(LexGetStringConstant) out of memory");

    for (escBufPos = escBuf, lexer->pos = startPos; lexer->pos != endPos;)
        *escBufPos++ = LexUnEscapeCharacter(&lexer->pos, endPos);

    /* try to find an existing copy of this string literal */
    regString = MyTable::TableStrRegister2(pc, escBuf, escBufPos - escBuf);
    Interpreter::HeapPopStack(escBuf, endPos - startPos);
    arrayValue = Variable::VariableStringLiteralGet(pc, regString);
    if (arrayValue == NULL) {
        /* create and store this string literal */
        arrayValue = Variable::VariableAllocValueAndData(pc, NULL, 0, false, NULL, true);
        arrayValue->typ = pc->charArrayType;
        arrayValue->val = (union AnyValue *)regString;
        (pc, regString, arrayValue);
    }

    /* create the the pointer for this char* */
    value->typ = pc->charPtrType;
    value->val->pointer = regString;
    if (*lexer->pos == endChar)
        LEXER_INC(lexer);

    return kTokenStringConstant;
}

/* get a character constant - used while scanning */
LexToken Lex::LexGetCharacterConstant(Interpreter *pc, LexState *lexer,
    Value *value) {
    value->typ = &pc->charType;
    value->val->character = LexUnEscapeCharacter(&lexer->pos, lexer->end);
    if (lexer->pos != lexer->end && *lexer->pos != '\'')
        Platform::Platform::LexFail(pc, lexer, "expected \"'\"");

    LEXER_INC(lexer);
    return kTokenCharacterConstant;
}

/* skip a comment - used while scanning */
void Lex::LexSkipComment(LexState *lexer, char nextChar) {
    if (nextChar == '*') {
        /* conventional C comment */
        while (lexer->pos != lexer->end &&
                (*(lexer->pos-1) != '*' || *lexer->pos != '/')) {
            if (*lexer->pos == '\n')
                lexer->emitExtraNewlines++;
            LEXER_INC(lexer);
        }

        if (lexer->pos != lexer->end)
            LEXER_INC(lexer);

        lexer->mode = kLexModeNormal;
    } else {
        /* C++ style comment */
        while (lexer->pos != lexer->end && *lexer->pos != '\n')
            LEXER_INC(lexer);
    }
}

/* skip a line continuation - used while scanning */
void Lex::LexSkipLineCont(LexState *lexer, char nextChar) {
    while (lexer->pos != lexer->end && *lexer->pos != '\n') {
        LEXER_INC(lexer);
    }
}

/* get a single token from the source - used while scanning */
LexToken Lex::LexScanGetToken(Interpreter *pc, LexState *lexer,
    Value **value) {
    char thisChar;
    char nextChar;
    LexToken gotToken = kTokenNone;

    /* handle cases line multi-line comments or string constants
        which mess up the line count */
    if (lexer->emitExtraNewlines > 0) {
        lexer->emitExtraNewlines--;
        return kTokenEndOfLine;
    }

    /* scan for a token */
    do {
        *value = &pc->lexValue;
        while (lexer->pos != lexer->end && isspace((int)*lexer->pos)) {
            if (*lexer->pos == '\n') {
                lexer->line++;
                lexer->pos++;
                lexer->mode = kLexModeNormal;
                lexer->characterPos = 0;
                return kTokenEndOfLine;
            } else if (lexer->mode == kLexModeHashDefine ||
                                    lexer->mode == kLexModeHashDefineSpace)
                lexer->mode = kLexModeHashDefineSpace;
            else if (lexer->mode == kLexModeHashDefineSpaceIdent)
                lexer->mode = kLexModeNormal;

            LEXER_INC(lexer);
        }

        if (lexer->pos == lexer->end || *lexer->pos == '\0')
            return kTokenEOF;

        thisChar = *lexer->pos;
        if (isCidstart((int)thisChar))
            return LexGetWord(pc, lexer, *value);

        if (isdigit((int)thisChar))
            return LexGetNumber(pc, lexer, *value);

        nextChar = (lexer->pos+1 != lexer->end) ? *(lexer->pos+1) : 0;
        LEXER_INC(lexer);
        switch (thisChar) {
        case '"':
            gotToken = LexGetStringConstant(pc, lexer, *value, '"');
            break;
        case '\'':
            gotToken = LexGetCharacterConstant(pc, lexer, *value);
            break;
        case '(':
            if (lexer->mode == kLexModeHashDefineSpaceIdent)
                gotToken = kTokenOpenMacroBracket;
            else
                gotToken = kTokenOpenBracket;
            lexer->mode = kLexModeNormal;
            break;
        case ')':
            gotToken = kTokenCloseBracket;
            break;
        case '=':
            NEXTIS('=', kTokenEqual, kTokenAssign);
            break;
        case '+':
            NEXTIS3('=', kTokenAddAssign, '+', kTokenIncrement, kTokenPlus);
            break;
        case '-':
            NEXTIS4('=', kTokenSubtractAssign, '>', kTokenArrow, '-',
                kTokenDecrement, kTokenMinus);
            break;
        case '*':
            NEXTIS('=', kTokenMultiplyAssign, kTokenAsterisk); break;
        case '/':
            if (nextChar == '/' || nextChar == '*') {
                LEXER_INC(lexer);
                LexSkipComment(lexer, nextChar);
            } else
                NEXTIS('=', kTokenDivideAssign, kTokenSlash);
            break;
        case '%':
            NEXTIS('=', kTokenModulusAssign, kTokenModulus); break;
        case '<':
            if (lexer->mode == kLexModeHashInclude)
                gotToken = LexGetStringConstant(pc, lexer, *value, '>');
            else {
                NEXTIS3PLUS('=', kTokenLessEqual, '<', kTokenShiftLeft, '=',
                    kTokenShiftLeftAssign, kTokenLessThan);
            }
            break;
        case '>':
            NEXTIS3PLUS('=', kTokenGreaterEqual, '>', kTokenShiftRight, '=',
                kTokenShiftRightAssign, kTokenGreaterThan);
            break;
        case ';':
            gotToken = kTokenSemicolon;
            break;
        case '&':
            NEXTIS3('=', kTokenArithmeticAndAssign, '&', kTokenLogicalAnd,
                kTokenAmpersand);
            break;
        case '|':
            NEXTIS3('=', kTokenArithmeticOrAssign, '|', kTokenLogicalOr,
                kTokenArithmeticOr);
            break;
        case '{':
            gotToken = kTokenLeftBrace;
            break;
        case '}':
            gotToken = kTokenRightBrace;
            break;
        case '[':
            gotToken = kTokenLeftSquareBracket;
            break;
        case ']':
            gotToken = kTokenRightSquareBracket;
            break;
        case '!':
            NEXTIS('=', kTokenNotEqual, kTokenUnaryNot);
            break;
        case '^':
            NEXTIS('=', kTokenArithmeticExorAssign, kTokenArithmeticExor);
            break;
        case '~':
            gotToken = kTokenUnaryExor;
            break;
        case ',':
            gotToken = kTokenComma;
            break;
        case '.':
            NEXTISEXACTLY3('.', '.', kTokenEllipsis, kTokenDot);
            break;
        case '?':
            gotToken = kTokenQuestionMark;
            break;
        case ':':
            gotToken = kTokenColon;
            break;
// XXX: line continuation feature
        case '\\':
            if (nextChar == ' ' || nextChar == '\n') {
                LEXER_INC(lexer);
                LexSkipLineCont(lexer, nextChar);
            } else
                Platform::LexFail(pc, lexer, "illegal character '%c'", thisChar);
            break;
        default:
            Platform::LexFail(pc, lexer, "illegal character '%c'", thisChar);
            break;
        }
    } while (gotToken == kTokenNone);

    return gotToken;
}

/* what size value goes with each token */
int Lex::LexTokenSize(LexToken token) {
    switch (token) {
        case kTokenIdentifier: case kTokenStringConstant: return sizeof(char*);
        case kTokenIntegerConstant: return sizeof(long);
        case kTokenCharacterConstant: return sizeof(unsigned char);
        case kTokenFPConstant: return sizeof(double);
        default: return 0;
    }
}

/* produce tokens from the lexer and return a heap buffer with
    the result - used for scanning */
void *Lex::LexTokenize(Interpreter *pc, LexState *lexer, int *tokenLen) {
    int memUsed = 0;
    int valueSize;
    int lastCharacterPos = 0;
    int reserveSpace = (lexer->end - lexer->pos) * 4 + 16;
    void *heapMem;
    void *tokenSpace = Interpreter::HeapAllocStack(reserveSpace);
    LexToken token;
    Value *gotValue;
    char *tokenPos = (char*)tokenSpace;

    if (tokenSpace == NULL)
        Platform::LexFail(pc, lexer, "(LexTokenize tokenSpace == NULL) out of memory");

    do {
        /* store the token at the end of the stack area */
        token = LexScanGetToken(pc, lexer, &gotValue);

#ifdef DEBUG_LEXER
        printf("token: %02x\n", token);
#endif
        *(unsigned char*)tokenPos = token;
        tokenPos++;
        memUsed++;

        *(unsigned char*)tokenPos = (unsigned char)lastCharacterPos;
        tokenPos++;
        memUsed++;

        valueSize = LexTokenSize(token);
        if (valueSize > 0) {
            /* store a value as well */
            memcpy((void*)tokenPos, (void*)gotValue->val, valueSize);
            tokenPos += valueSize;
            memUsed += valueSize;
        }

        lastCharacterPos = lexer->characterPos;

    } while (token != kTokenEOF);

    heapMem = Interpreter::HeapAllocMem(memUsed);
    if (heapMem == NULL)
        Platform::LexFail(pc, lexer, "(LexTokenize heapMem == NULL) out of memory");

    assert(reserveSpace >= memUsed);
    memcpy(heapMem, tokenSpace, memUsed);
    Interpreter::HeapPopStack(tokenSpace, reserveSpace);
#ifdef DEBUG_LEXER
    {
        int i;
        printf("tokens: ");
        for (i = 0; i < memUsed; i++)
            printf("%02x ", *((unsigned char*)heapMem+i));
        printf("\n");
    }
#endif
    if (tokenLen)
        *tokenLen = memUsed;

    return heapMem;
}

/* lexically analyse some source text */
void *Lex::LexAnalyse(Interpreter *pc, const char *fileName, const char *source,
    int sourceLen, int *tokenLen) {
    LexState lexer;

    lexer.pos = source;
    lexer.end = source + sourceLen;
    lexer.line = 1;
    lexer.fileName = fileName;
    lexer.mode = kLexModeNormal;
    lexer.emitExtraNewlines = 0;
    lexer.characterPos = 1;
    lexer.sourceText = source;

    return LexTokenize(pc, &lexer, tokenLen);
}

/* prepare to parse a pre-tokenised buffer */
void Lex::LexInitParser(Interpreter *pc, ParseState *parser, const char *sourceText,
    void *tokenSource, char *fileName, int runIt, int enableDebugger) {
    parser->pc = pc;
    parser->pos = (const unsigned char *)tokenSource;
    parser->line = 1;
    parser->fileName = fileName;
    parser->mode = runIt ? kRunModeRun : kRunModeSkip;
    parser->searchLabel = 0;
    parser->hashIfLevel = 0;
    parser->hashIfEvaluateToLevel = 0;
    parser->characterPos = 0;
    parser->sourceText = sourceText;
    parser->debugMode = enableDebugger;
}

/* get the next token, without pre-processing */
LexToken Lex::LexGetRawToken(ParseState *parser, Value **value,
    int incPos) {
    int valueSize;
    char *prompt = NULL;
    LexToken token = kTokenNone;
    Interpreter *pc = parser->pc;

    do {
        /* get the next token */
        if (parser->pos == NULL && pc->interactiveHead != NULL)
            parser->pos = pc->interactiveHead->tokens;

        if (parser->fileName != pc->strEmpty || pc->interactiveHead != NULL) {
            /* skip leading newlines */
            while ((token = (LexToken)*(unsigned char*)parser->pos) == kTokenEndOfLine) {
                parser->line++;
                parser->pos += TOKEN_DATA_OFFSET;
            }
        }

        if (parser->fileName == pc->strEmpty &&
                (pc->interactiveHead == NULL || token == kTokenEOF)) {
            /* we're at the end of an interactive input token list */
            char LineBuffer[LINEBUFFER_MAX];
            void *LineTokens;
            int LineBytes;
            TokenLine *LineNode;

            if (pc->interactiveHead == NULL ||
                    (unsigned char*)parser->pos ==
                    &pc->interactiveTail->tokens[pc->interactiveTail->numBytes-TOKEN_DATA_OFFSET]) {
                /* get interactive input */
                if (pc->lexUseStatementPrompt) {
                    prompt = INTERACTIVE_PROMPT_STATEMENT;
                    pc->lexUseStatementPrompt = false;
                } else
                    prompt = INTERACTIVE_PROMPT_LINE;

                if (Platform::PlatformGetLine(&LineBuffer[0], LINEBUFFER_MAX, prompt) == NULL)
                    return kTokenEOF;

                /* put the new line at the end of the linked list of interactive lines */
                LineTokens = LexAnalyse(pc, pc->strEmpty, &LineBuffer[0],
                    strlen(LineBuffer), &LineBytes);
                LineNode = (TokenLine *)(pc, parser,
                    sizeof(TokenLine), true);
                LineNode->tokens = (unsigned char *)LineTokens;
                LineNode->numBytes = LineBytes;
                if (pc->interactiveHead == NULL) {
                    /* start a new list */
                    pc->interactiveHead = LineNode;
                    parser->line = 1;
                    parser->characterPos = 0;
                } else
                    pc->interactiveTail->next = LineNode;

                pc->interactiveTail = LineNode;
                pc->interactiveCurrentLine = LineNode;
                parser->pos = (const unsigned char *)LineTokens;
            } else {
                /* go to the next token line */
                if (parser->pos != &pc->interactiveCurrentLine->tokens[pc->interactiveCurrentLine->numBytes-TOKEN_DATA_OFFSET]) {
                    /* scan for the line */
                    for (pc->interactiveCurrentLine = pc->interactiveHead;
                            parser->pos != &pc->interactiveCurrentLine->tokens[pc->interactiveCurrentLine->numBytes-TOKEN_DATA_OFFSET];
                            pc->interactiveCurrentLine = pc->interactiveCurrentLine->next) {
                        assert(pc->interactiveCurrentLine->next != NULL);
                    }
                }

                assert(pc->interactiveCurrentLine != NULL);
                pc->interactiveCurrentLine = pc->interactiveCurrentLine->next;
                assert(pc->interactiveCurrentLine != NULL);
                parser->pos = pc->interactiveCurrentLine->tokens;
            }

            token = (LexToken)*(unsigned char*)parser->pos;
        }
    } while ((parser->fileName == pc->strEmpty && token == kTokenEOF) ||
        token == kTokenEndOfLine);

    parser->characterPos = *((unsigned char*)parser->pos + 1);
    valueSize = LexTokenSize(token);
    if (valueSize > 0) {
        /* this token requires a value - unpack it */
        if (value != NULL) {
            switch (token) {
            case kTokenStringConstant:
                pc->lexValue.typ = pc->charPtrType;
                break;
            case kTokenIdentifier:
                pc->lexValue.typ = NULL;
                break;
            case kTokenIntegerConstant:
                pc->lexValue.typ = &pc->longType;
                break;
            case kTokenCharacterConstant:
                pc->lexValue.typ = &pc->charType;
                break;
            case kTokenFPConstant:
                pc->lexValue.typ = &pc->fPType;
                break;
            default:
                break;
            }

            memcpy((void*)pc->lexValue.val,
                (void*)((char*)parser->pos+TOKEN_DATA_OFFSET), valueSize);
            pc->lexValue.valOnHeap = false;
            pc->lexValue.valOnStack = false;
            pc->lexValue.isLValue = false;
            pc->lexValue.lValueFrom = NULL;
            *value = &pc->lexValue;
        }

        if (incPos)
            parser->pos += valueSize + TOKEN_DATA_OFFSET;
    } else {
        if (incPos && token != kTokenEOF)
            parser->pos += TOKEN_DATA_OFFSET;
    }

#ifdef DEBUG_LEXER
    printf("Got token=%02x inc=%d pos=%d\n", token, incPos, parser->characterPos);
#endif
    assert(token >= kTokenNone && token <= kTokenEndOfFunction);
    return token;
}

/* correct the token position depending if we already incremented the position */
void Lex::LexHashIncPos(ParseState *parser, int incPos) {
    if (!incPos)
        LexGetRawToken(parser, NULL, true);
}

/* handle a #ifdef directive */
void Lex::LexHashIfdef(ParseState *parser, int ifNot) {
    /* get symbol to check */
    int isDefined;
    Value *identValue;
    Value *savedValue;
    LexToken token = LexGetRawToken(parser, &identValue, true);

    if (token != kTokenIdentifier)
        Platform::ProgramFail(parser, "identifier expected");

    /* is the identifier defined? */
    isDefined = MyTable::TableGet(&parser->pc->globalTable, identValue->val->identifier,
        &savedValue, NULL, NULL, NULL);
    if (parser->hashIfEvaluateToLevel == parser->hashIfLevel &&
            ((isDefined && !ifNot) || (!isDefined && ifNot))) {
        /* #if is active, evaluate to this new level */
        parser->hashIfEvaluateToLevel++;
    }

    parser->hashIfLevel++;
}

/* handle a #if directive */
void Lex::LexHashIf(ParseState *parser) {
    /* get symbol to check */
    Value *identValue;
    Value *savedValue = NULL;
    ParseState macroParser;
    LexToken token = LexGetRawToken(parser, &identValue, true);

    if (token == kTokenIdentifier) {
        /* look up a value from a macro definition */
        if (!MyTable::TableGet(&parser->pc->globalTable, identValue->val->identifier,
                &savedValue, NULL, NULL, NULL))
            Platform::ProgramFail(parser, "'%s' is undefined", identValue->val->identifier);

        if (savedValue->typ->base != kTypeMacro)
            Platform::ProgramFail(parser, "value expected");

        Parse::ParserCopy(&macroParser, &savedValue->val->MacroDef.body);
        token = LexGetRawToken(&macroParser, &identValue, true);
    }

    if (token != kTokenCharacterConstant && token != kTokenIntegerConstant)
        Platform::ProgramFail(parser, "value expected");

    /* is the identifier defined? */
    if (parser->hashIfEvaluateToLevel == parser->hashIfLevel &&
            identValue->val->character) {
        /* #if is active, evaluate to this new level */
        parser->hashIfEvaluateToLevel++;
    }

    parser->hashIfLevel++;
}

/* handle a #else directive */
void Lex::LexHashElse(ParseState *parser) {
    if (parser->hashIfEvaluateToLevel == parser->hashIfLevel - 1)
        parser->hashIfEvaluateToLevel++;  /* #if was not active, make
                                            this next section active */
    else if (parser->hashIfEvaluateToLevel == parser->hashIfLevel) {
        /* #if was active, now go inactive */
        if (parser->hashIfLevel == 0)
            Platform::ProgramFail(parser, "#else without #if");

        parser->hashIfEvaluateToLevel--;
    }
}

/* handle a #endif directive */
void Lex::LexHashEndif(ParseState *parser) {
    if (parser->hashIfLevel == 0)
        Platform::ProgramFail(parser, "#endif without #if");

    parser->hashIfLevel--;
    if (parser->hashIfEvaluateToLevel > parser->hashIfLevel)
        parser->hashIfEvaluateToLevel = parser->hashIfLevel;
}

#if 0 /* useful for debug */
void Lex::LexPrintToken(LexToken token) {
    char* tokenNames[] = {
        /* 0x00 */  "None",
        /* 0x01 */  "Comma",
        /* 0x02 */  "Assign",
                    "AddAssign",
                    "SubtractAssign",
                    "MultiplyAssign",
                    "DivideAssign",
                    "ModulusAssign",
        /* 0x08 */  "ShiftLeftAssign",
                    "ShiftRightAssign",
                    "ArithmeticAndAssign",
                    "ArithmeticOrAssign",
                    "ArithmeticExorAssign",
        /* 0x0d */  "QuestionMark",
                    "Colon",
        /* 0x0f */  "LogicalOr",
        /* 0x10 */  "LogicalAnd",
        /* 0x11 */  "ArithmeticOr",
        /* 0x12 */  "ArithmeticExor",
        /* 0x13 */  "Ampersand",
        /* 0x14 */  "Equal",
                    "NotEqual",
        /* 0x16 */  "LessThan",
                    "GreaterThan",
                    "LessEqual",
                    "GreaterEqual",
        /* 0x1a */  "ShiftLeft",
                    "ShiftRight",
        /* 0x1c */  "Plus",
                    "Minus",
        /* 0x1e */  "Asterisk",
                    "Slash",
                    "Modulus",
        /* 0x21 */  "Increment",
                    "Decrement",
                    "UnaryNot",
                    "UnaryExor",
                    "sizeNeed",
                    "Cast",
        /* 0x27 */  "LeftSquareBracket",
                    "RightSquareBracket",
                    "Dot",
                    "Arrow",
        /* 0x2b */  "OpenBracket",
                    "CloseBracket",
        /* 0x2d */  "identifier",
                    "IntegerConstant",
                    "FPConstant",
                    "StringConstant",
                    "CharacterConstant",
        /* 0x32 */  "Semicolon",
                    "Ellipsis",
        /* 0x34 */  "LeftBrace",
                    "RightBrace",
        /* 0x36 */  "intType",
                    "charType",
                    "FloatType",
                    "DoubleType",
                    "voidType",
                    "enumType",
        /* 0x3c */  "longType",
                    "SignedType",
                    "shortType",
                    "StaticType",
                    "AutoType",
                    "RegisterType",
                    "ExternType",
                    "StructType",
                    "UnionType",
                    "UnsignedType",
                    "Typedef",
        /* 0x46 */  "Continue",
                    "Do",
                    "Else",
                    "For",
                    "Goto",
                    "If",
                    "While",
                    "Break",
                    "Switch",
                    "Case",
                    "Default",
                    "Return",
        /* 0x52 */
                    "HashDefine",
                    "HashInclude",
                    "HashIf",
                    "HashIfdef",
                    "HashIfndef",
                    "HashElse",
                    "HashEndif",
        /* 0x59 */  "New",
                    "Delete",
        /* 0x5b */  "OpenMacroBracket",
        /* 0x5c */  "EOF",
                    "EndOfLine",
                    "EndOfFunction"
    };
    printf("{%s}", tokenNames[token]);
}
#endif

/* get the next token given a parser state, pre-processing as we go */
LexToken Lex::LexGetToken(ParseState *parser, Value **value,
    int incPos) {
    int tryNextToken;
    LexToken token;

    /* implements the pre-processor #if commands */
    do {
        int wasPreProcToken = true;

        token = LexGetRawToken(parser, value, incPos);
        switch (token) {
        case kTokenHashIfdef:
            LexHashIncPos(parser, incPos); LexHashIfdef(parser, false);
            break;
        case kTokenHashIfndef:
            LexHashIncPos(parser, incPos); LexHashIfdef(parser, true);
            break;
        case kTokenHashIf:
            LexHashIncPos(parser, incPos); LexHashIf(parser);
            break;
        case kTokenHashElse:
            LexHashIncPos(parser, incPos); LexHashElse(parser);
            break;
        case kTokenHashEndif:
            LexHashIncPos(parser, incPos); LexHashEndif(parser);
            break;
        default:
            wasPreProcToken = false;
            break;
        }

        /* if we're going to reject this token, increment the token
            pointer to the next one */
        tryNextToken = (parser->hashIfEvaluateToLevel < parser->hashIfLevel &&
                token != kTokenEOF) || wasPreProcToken;
        if (!incPos && tryNextToken)
            LexGetRawToken(parser, NULL, true);

    } while (tryNextToken);

    return token;
}

/* take a quick peek at the next token, skipping any pre-processing */
LexToken Lex::LexRawPeekToken(ParseState *parser) {
    return (LexToken)*(unsigned char*)parser->pos;
}

/* find the end of the line */
void Lex::LexToEndOfMacro(ParseState *parser) {
// XXX: line continuation feature
    bool isContinued = false;
    while (true) {
        LexToken token = (LexToken)*(unsigned char*)parser->pos;
        if (token == kTokenEOF)
            return;
        else if (token == kTokenEndOfLine) {
            if (!isContinued)
                return;
            isContinued = false;
        }
        if (token == kTokenBackSlash)
            isContinued = true;
        LexGetRawToken(parser, NULL, true);
    }
}

/* copy the tokens from startParser to endParser into new memory, removing
    TokenEOFs and terminate with a kTokenEndOfFunction */
void *Lex::LexCopyTokens(ParseState *startParser, ParseState *endParser) {
    int memSize = 0;
    int copySize;
    unsigned char *pos = (unsigned char*)startParser->pos;
    unsigned char *newTokens;
    unsigned char *newTokenPos;
    TokenLine *iLine;
    Interpreter *pc = startParser->pc;

    if (pc->interactiveHead == NULL) {
        /* non-interactive mode - copy the tokens */
        memSize = endParser->pos - startParser->pos;
        newTokens = (unsigned char *)(pc, startParser, memSize + TOKEN_DATA_OFFSET, true);
        memcpy(newTokens, (void*)startParser->pos, memSize);
    } else {
        /* we're in interactive mode - add up line by line */
        for (pc->interactiveCurrentLine = pc->interactiveHead;
                pc->interactiveCurrentLine != NULL &&
                (pos < &pc->interactiveCurrentLine->tokens[0] ||
                    pos >= &pc->interactiveCurrentLine->tokens[pc->interactiveCurrentLine->numBytes]);
                pc->interactiveCurrentLine = pc->interactiveCurrentLine->next) {
        } /* find the line we just counted */

        if (endParser->pos >= startParser->pos &&
                endParser->pos < &pc->interactiveCurrentLine->tokens[pc->interactiveCurrentLine->numBytes]) {
            /* all on a single line */
            memSize = endParser->pos - startParser->pos;
            newTokens = (unsigned char *)(pc, startParser, memSize + TOKEN_DATA_OFFSET, true);
            memcpy(newTokens, (void*)startParser->pos, memSize);
        } else {
            /* it's spread across multiple lines */
            memSize = &pc->interactiveCurrentLine->tokens[pc->interactiveCurrentLine->numBytes-TOKEN_DATA_OFFSET] - pos;

            for (iLine = pc->interactiveCurrentLine->next;
                    iLine != NULL &&
                    (endParser->pos < &iLine->tokens[0] || endParser->pos >= &iLine->tokens[iLine->numBytes]);
                    iLine = iLine->next)
                memSize += iLine->numBytes - TOKEN_DATA_OFFSET;

            assert(iLine != NULL);
            memSize += endParser->pos - &iLine->tokens[0];
            newTokens = (unsigned char *)(pc, startParser, memSize + TOKEN_DATA_OFFSET, true);

            copySize = &pc->interactiveCurrentLine->tokens[pc->interactiveCurrentLine->numBytes-TOKEN_DATA_OFFSET] - pos;
            memcpy(newTokens, pos, copySize);
            newTokenPos = newTokens + copySize;
            for (iLine = pc->interactiveCurrentLine->next; iLine != NULL &&
                    (endParser->pos < &iLine->tokens[0] || endParser->pos >= &iLine->tokens[iLine->numBytes]);
                    iLine = iLine->next) {
                memcpy(newTokenPos, &iLine->tokens[0], iLine->numBytes - TOKEN_DATA_OFFSET);
                newTokenPos += iLine->numBytes-TOKEN_DATA_OFFSET;
            }
            assert(iLine != NULL);
            memcpy(newTokenPos, &iLine->tokens[0], endParser->pos - &iLine->tokens[0]);
        }
    }

    newTokens[memSize] = (unsigned char)kTokenEndOfFunction;

    return newTokens;
}

/* indicate that we've completed up to this point in the interactive input
    and free expired tokens */
void Lex::LexInteractiveClear(Interpreter *pc, ParseState *parser) {
    while (pc->interactiveHead != NULL) {
        TokenLine *NextLine = pc->interactiveHead->next;

        Interpreter::HeapFreeMem(pc->interactiveHead->tokens);
        Interpreter::HeapFreeMem(pc->interactiveHead);
        pc->interactiveHead = NextLine;
    }

    if (parser != NULL)
        parser->pos = NULL;

    pc->interactiveTail = NULL;
}

/* indicate that we've completed up to this point in the interactive
    input and free expired tokens */
void Lex::LexInteractiveCompleted(Interpreter *pc, ParseState *parser) {
    while (pc->interactiveHead != NULL &&
            !(parser->pos >= &pc->interactiveHead->tokens[0] &&
                parser->pos < &pc->interactiveHead->tokens[pc->interactiveHead->numBytes])) {
        /* this token line is no longer needed - free it */
        TokenLine *NextLine = pc->interactiveHead->next;

        Interpreter::HeapFreeMem(pc->interactiveHead->tokens);
        Interpreter::HeapFreeMem(pc->interactiveHead);
        pc->interactiveHead = NextLine;

        if (pc->interactiveHead == NULL) {
            /* we've emptied the list */
            parser->pos = NULL;
            pc->interactiveTail = NULL;
        }
    }
}

/* the next time we prompt, make it the full statement prompt */
void Lex::LexInteractiveStatementPrompt(Interpreter *pc) {
    pc->lexUseStatementPrompt = true;
}
