//////////////////////////////////////////////////////////////////////////////////
//                                                                              //
//  This file is part of the buola project (https://code.google.com/p/buola/).  //
//                                                                              //
//  Copyright(c) 2007-2012 Xavi Gratal                                          //
//  gratal AT gmail DOT com                                                     //
//                                                                              //
//  Buola is free software: you can redistribute it and/or modify               //
//  it under the terms of the GNU General Public License as published by        //
//  the Free Software Foundation, either version 3 of the License, or           //
//  (at your option) any later version.                                         //
//                                                                              //
//  Buola is distributed in the hope that it will be useful,                    //
//  but WITHOUT ANY WARRANTY; without even the implied warranty of              //
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               //
//  GNU General Public License for more details.                                //
//                                                                              //
//  You should have received a copy of the GNU General Public License           //
//  along with buola.  If not, see <http://www.gnu.org/licenses/>.              //
//                                                                              //
//////////////////////////////////////////////////////////////////////////////////

#include <buola/buola.h>
#include <cwctype>
#include "ctokenizer.h"

#include "cpythoninteger.h"
#include "cpythonfloat.h"

namespace buola { namespace bush {

CTokenizer::CTokenizer()
{
    mKeywords.Add(L"False",TOKEN_KW_FALSE);
    mKeywords.Add(L"class",TOKEN_KW_CLASS);
    mKeywords.Add(L"finally",TOKEN_KW_FINALLY);
    mKeywords.Add(L"is",TOKEN_KW_IS);
    mKeywords.Add(L"return",TOKEN_KW_RETURN);
    mKeywords.Add(L"None",TOKEN_KW_NONE);
    mKeywords.Add(L"continue",TOKEN_KW_CONTINUE);
    mKeywords.Add(L"for",TOKEN_KW_FOR);
    mKeywords.Add(L"lambda",TOKEN_KW_LAMBDA);
    mKeywords.Add(L"try",TOKEN_KW_TRY);
    mKeywords.Add(L"True",TOKEN_KW_TRUE);
    mKeywords.Add(L"def",TOKEN_KW_DEF);
    mKeywords.Add(L"from",TOKEN_KW_FROM);
    mKeywords.Add(L"nonlocal",TOKEN_KW_NONLOCAL);
    mKeywords.Add(L"while",TOKEN_KW_WHILE);
    mKeywords.Add(L"and",TOKEN_KW_AND);
    mKeywords.Add(L"del",TOKEN_KW_DEL);
    mKeywords.Add(L"global",TOKEN_KW_GLOBAL);
    mKeywords.Add(L"not",TOKEN_KW_NOT);
    mKeywords.Add(L"with",TOKEN_KW_WITH);
    mKeywords.Add(L"as",TOKEN_KW_AS);
    mKeywords.Add(L"elif",TOKEN_KW_ELIF);
    mKeywords.Add(L"if",TOKEN_KW_IF);
    mKeywords.Add(L"or",TOKEN_KW_OR);
    mKeywords.Add(L"yield",TOKEN_KW_YIELD);
    mKeywords.Add(L"assert",TOKEN_KW_ASSERT);
    mKeywords.Add(L"else",TOKEN_KW_ELSE);
    mKeywords.Add(L"import",TOKEN_KW_IMPORT);
    mKeywords.Add(L"pass",TOKEN_KW_PASS);
    mKeywords.Add(L"break",TOKEN_KW_BREAK);
    mKeywords.Add(L"except",TOKEN_KW_EXCEPT);
    mKeywords.Add(L"in",TOKEN_KW_IN);
    mKeywords.Add(L"raise",TOKEN_KW_RAISE);

    mOps.Add(L":",TOKEN_OP_COLON);
    mOps.Add(L",",TOKEN_OP_COMMA);
    mOps.Add(L";",TOKEN_OP_SEMICOLON);
    mOps.Add(L"+",TOKEN_OP_PLUS);
    mOps.Add(L"-",TOKEN_OP_MINUS);
    mOps.Add(L"*",TOKEN_OP_STAR);
    mOps.Add(L"/",TOKEN_OP_SLASH);
    mOps.Add(L"|",TOKEN_OP_OR);
    mOps.Add(L"&",TOKEN_OP_AND);
    mOps.Add(L"<",TOKEN_OP_LESS);
    mOps.Add(L">",TOKEN_OP_GREATER);
    mOps.Add(L"=",TOKEN_OP_EQUAL);
    mOps.Add(L".",TOKEN_OP_DOT);
    mOps.Add(L"%",TOKEN_OP_PERCENT);
    mOps.Add(L"^",TOKEN_OP_XOR);
    mOps.Add(L"~",TOKEN_OP_TILDE);
    mOps.Add(L"@",TOKEN_OP_AT);
    mOps.Add(L"==",TOKEN_OP_EQEQUAL);
    mOps.Add(L"!=",TOKEN_OP_NOTEQUALEXC);
    mOps.Add(L"<>",TOKEN_OP_NOTEQUALLG);
    mOps.Add(L"<=",TOKEN_OP_LESSEQUAL);
    mOps.Add(L"<<",TOKEN_OP_LEFTSHIFT);
    mOps.Add(L">=",TOKEN_OP_GREATEREQUAL);
    mOps.Add(L">>",TOKEN_OP_RIGHTSHIFT);
    mOps.Add(L"+=",TOKEN_OP_PLUSEQUAL);
    mOps.Add(L"-=",TOKEN_OP_MINUSEQUAL);
    mOps.Add(L"**",TOKEN_OP_POWER);
    mOps.Add(L"*=",TOKEN_OP_STAREQUAL);
    mOps.Add(L"//",TOKEN_OP_DOUBLESLASH);
    mOps.Add(L"/=",TOKEN_OP_SLASHEQUAL);
    mOps.Add(L"|=",TOKEN_OP_OREQUAL);
    mOps.Add(L"%=",TOKEN_OP_PERCENTEQUAL);
    mOps.Add(L"&=",TOKEN_OP_ANDEQUAL);
    mOps.Add(L"^=",TOKEN_OP_XOREQUAL);
    mOps.Add(L"->",TOKEN_OP_RARROW);
    mOps.Add(L"<<=",TOKEN_OP_LEFTSHIFTEQUAL);
    mOps.Add(L">>=",TOKEN_OP_RIGHTSHIFTEQUAL);
    mOps.Add(L"**=",TOKEN_OP_POWEREQUAL);
    mOps.Add(L"//=",TOKEN_OP_DOUBLESLASHEQUAL);
    mOps.Add(L"...",TOKEN_OP_ELLIPSIS);
    mOps.Add(L"!",TOKEN_OP_EXCLAMATION);
    mOps.Add(L"$",TOKEN_OP_DOLLAR);
    mOps.Add(L"?",TOKEN_OP_QUESTION);
    mOps.Add(L"!",TOKEN_OP_EXCLAMATION);
    mOps.Add(L"&&",TOKEN_OP_ANDAND);
    mOps.Add(L"||",TOKEN_OP_OROR);
    mOps.Add(L"|&",TOKEN_OP_ORAND);
    mOps.Add(L">&",TOKEN_OP_GTAND);
    mOps.Add(L"<&",TOKEN_OP_LTAND);
    mOps.Add(L">>>",TOKEN_OP_GT3);
    mOps.Add(L"<<<",TOKEN_OP_LT3);
    mOps.Add(L"&>",TOKEN_OP_ANDGT);
    mOps.Add(L">?",TOKEN_OP_GTQUESTION);
            
    mTokenNames[TOKEN_INVALID]="INVALID";
    mTokenNames[TOKEN_ESCAPEDCHAR]="ESCAPEDCHAR";
    mTokenNames[TOKEN_SPACE]="SPACE";
    mTokenNames[TOKEN_COMMENT]="COMMENT";
    mTokenNames[TOKEN_NEWLINE]="NEWLINE";
    mTokenNames[TOKEN_INDENT]="INDENT";
    mTokenNames[TOKEN_DEDENT]="DEDENT";
    mTokenNames[TOKEN_DEDENTERROR]="DEDENTERROR";
    mTokenNames[TOKEN_INTEGER]="INTEGER";
    mTokenNames[TOKEN_FLOAT]="FLOAT";
    mTokenNames[TOKEN_STRING_S]="STRING_S";
    mTokenNames[TOKEN_STRING_D]="STRING_D";
    mTokenNames[TOKEN_STRING_I]="STRING_I";
    mTokenNames[TOKEN_STRING_TS]="STRING_TS";
    mTokenNames[TOKEN_STRING_TD]="STRING_TD";
    mTokenNames[TOKEN_STRING_TI]="STRING_TI";
    mTokenNames[TOKEN_INCOMPLETESTRING_S]="INCOMPLETESTRING_S";
    mTokenNames[TOKEN_INCOMPLETESTRING_D]="INCOMPLETESTRING_D";
    mTokenNames[TOKEN_INCOMPLETESTRING_I]="INCOMPLETESTRING_I";
    mTokenNames[TOKEN_INCOMPLETESTRING_TS]="INCOMPLETESTRING_TS";
    mTokenNames[TOKEN_INCOMPLETESTRING_TD]="INCOMPLETESTRING_TD";
    mTokenNames[TOKEN_INCOMPLETESTRING_TI]="INCOMPLETESTRING_TI";
    mTokenNames[TOKEN_KW_FALSE]="KW_FALSE";
    mTokenNames[TOKEN_KW_CLASS]="KW_CLASS";
    mTokenNames[TOKEN_KW_FINALLY]="KW_FINALLY";
    mTokenNames[TOKEN_KW_IS]="KW_IS";
    mTokenNames[TOKEN_KW_RETURN]="KW_RETURN";
    mTokenNames[TOKEN_KW_NONE]="KW_NONE";
    mTokenNames[TOKEN_KW_CONTINUE]="KW_CONTINUE";
    mTokenNames[TOKEN_KW_FOR]="KW_FOR";
    mTokenNames[TOKEN_KW_LAMBDA]="KW_LAMBDA";
    mTokenNames[TOKEN_KW_TRY]="KW_TRY";
    mTokenNames[TOKEN_KW_TRUE]="KW_TRUE";
    mTokenNames[TOKEN_KW_DEF]="KW_DEF";
    mTokenNames[TOKEN_KW_FROM]="KW_FROM";
    mTokenNames[TOKEN_KW_NONLOCAL]="KW_NONLOCAL";
    mTokenNames[TOKEN_KW_WHILE]="KW_WHILE";
    mTokenNames[TOKEN_KW_AND]="KW_AND";
    mTokenNames[TOKEN_KW_DEL]="KW_DEL";
    mTokenNames[TOKEN_KW_GLOBAL]="KW_GLOBAL";
    mTokenNames[TOKEN_KW_NOT]="KW_NOT";
    mTokenNames[TOKEN_KW_WITH]="KW_WITH";
    mTokenNames[TOKEN_KW_AS]="KW_AS";
    mTokenNames[TOKEN_KW_ELIF]="KW_ELIF";
    mTokenNames[TOKEN_KW_IF]="KW_IF";
    mTokenNames[TOKEN_KW_OR]="KW_OR";
    mTokenNames[TOKEN_KW_YIELD]="KW_YIELD";
    mTokenNames[TOKEN_KW_ASSERT]="KW_ASSERT";
    mTokenNames[TOKEN_KW_ELSE]="KW_ELSE";
    mTokenNames[TOKEN_KW_IMPORT]="KW_IMPORT";
    mTokenNames[TOKEN_KW_PASS]="KW_PASS";
    mTokenNames[TOKEN_KW_BREAK]="KW_BREAK";
    mTokenNames[TOKEN_KW_EXCEPT]="KW_EXCEPT";
    mTokenNames[TOKEN_KW_IN]="KW_IN";
    mTokenNames[TOKEN_KW_RAISE]="KW_RAISE";
    mTokenNames[TOKEN_LEFT_PAREN]="LEFT_PAREN";
    mTokenNames[TOKEN_RIGHT_PAREN]="RIGHT_PAREN";
    mTokenNames[TOKEN_DOLLAR_PAREN]="DOLLAR_PAREN";
    mTokenNames[TOKEN_LEFT_SQUARE]="LEFT_SQUARE";
    mTokenNames[TOKEN_RIGHT_SQUARE]="RIGHT_SQUARE";
    mTokenNames[TOKEN_DOLLAR_SQUARE]="DOLLAR_SQUARE";
    mTokenNames[TOKEN_LEFT_BRACE]="LEFT_BRACE";
    mTokenNames[TOKEN_RIGHT_BRACE]="RIGHT_BRACE";
    mTokenNames[TOKEN_DOLLAR_BRACE]="DOLLAR_BRACE";
    mTokenNames[TOKEN_OP_COLON]="OP_COLON";
    mTokenNames[TOKEN_OP_COMMA]="OP_COMMA";
    mTokenNames[TOKEN_OP_SEMICOLON]="OP_SEMICOLON";
    mTokenNames[TOKEN_OP_PLUS]="OP_PLUS";
    mTokenNames[TOKEN_OP_MINUS]="OP_MINUS";
    mTokenNames[TOKEN_OP_STAR]="OP_STAR";
    mTokenNames[TOKEN_OP_SLASH]="OP_SLASH";
    mTokenNames[TOKEN_OP_OR]="OP_OR";
    mTokenNames[TOKEN_OP_AND]="OP_AND";
    mTokenNames[TOKEN_OP_LESS]="OP_LESS";
    mTokenNames[TOKEN_OP_GREATER]="OP_GREATER";
    mTokenNames[TOKEN_OP_EQUAL]="OP_EQUAL";
    mTokenNames[TOKEN_OP_DOT]="OP_DOT";
    mTokenNames[TOKEN_OP_PERCENT]="OP_PERCENT";
    mTokenNames[TOKEN_OP_XOR]="OP_XOR";
    mTokenNames[TOKEN_OP_TILDE]="OP_TILDE";
    mTokenNames[TOKEN_OP_AT]="OP_AT";
    mTokenNames[TOKEN_OP_EQEQUAL]="OP_EQEQUAL";
    mTokenNames[TOKEN_OP_NOTEQUALEXC]="OP_NOTEQUALEXC";
    mTokenNames[TOKEN_OP_NOTEQUALLG]="OP_NOTEQUALLG";
    mTokenNames[TOKEN_OP_LESSEQUAL]="OP_LESSEQUAL";
    mTokenNames[TOKEN_OP_LEFTSHIFT]="OP_LEFTSHIFT";
    mTokenNames[TOKEN_OP_GREATEREQUAL]="OP_GREATEREQUAL";
    mTokenNames[TOKEN_OP_RIGHTSHIFT]="OP_RIGHTSHIFT";
    mTokenNames[TOKEN_OP_PLUSEQUAL]="OP_PLUSEQUAL";
    mTokenNames[TOKEN_OP_MINUSEQUAL]="OP_MINUSEQUAL";
    mTokenNames[TOKEN_OP_POWER]="OP_POWER";
    mTokenNames[TOKEN_OP_STAREQUAL]="OP_STAREQUAL";
    mTokenNames[TOKEN_OP_DOUBLESLASH]="OP_DOUBLESLASH";
    mTokenNames[TOKEN_OP_SLASHEQUAL]="OP_SLASHEQUAL";
    mTokenNames[TOKEN_OP_OREQUAL]="OP_OREQUAL";
    mTokenNames[TOKEN_OP_PERCENTEQUAL]="OP_PERCENTEQUAL";
    mTokenNames[TOKEN_OP_ANDEQUAL]="OP_ANDEQUAL";
    mTokenNames[TOKEN_OP_XOREQUAL]="OP_XOREQUAL";
    mTokenNames[TOKEN_OP_RARROW]="OP_RARROW";
    mTokenNames[TOKEN_OP_LEFTSHIFTEQUAL]="OP_LEFTSHIFTEQUAL";
    mTokenNames[TOKEN_OP_RIGHTSHIFTEQUAL]="OP_RIGHTSHIFTEQUAL";
    mTokenNames[TOKEN_OP_POWEREQUAL]="OP_POWEREQUAL";
    mTokenNames[TOKEN_OP_DOUBLESLASHEQUAL]="TOKEN_OP_DOUBLESLASHEQUAL";
    mTokenNames[TOKEN_OP_ELLIPSIS]="OP_ELLIPSIS";
    mTokenNames[TOKEN_OP_EXCLAMATION]="OP_EXCLAMATION";
    mTokenNames[TOKEN_OP_DOLLAR]="OP_DOLLAR";
    mTokenNames[TOKEN_OP_QUESTION]="OP_QUESTION";
    mTokenNames[TOKEN_OP_ANDAND]="OP_ANDAND";
    mTokenNames[TOKEN_OP_OROR]="OP_OROR";
    mTokenNames[TOKEN_IDENTIFIER]="IDENTIFIER";
    mTokenNames[TOKEN_LINECONT]="LINECONT";
    mTokenNames[TOKEN_EOI]="EOI";
    mTokenNames[TOKEN_OP_ORAND]="OP_ORAND";
    mTokenNames[TOKEN_OP_GTAND]="OP_GTAND";
    mTokenNames[TOKEN_OP_LTAND]="OP_LTAND";
    mTokenNames[TOKEN_OP_GT3]="OP_GT3";
    mTokenNames[TOKEN_OP_LT3]="OP_LT3";
    mTokenNames[TOKEN_OP_ANDGT]="OP_ANDGT";
    mTokenNames[TOKEN_OP_GTQUESTION]="OP_GTQUESTION";
    mTokenNames[TOKEN_DOLLAR_IDENTIFIER]="DOLLAR_IDENTIFIER";
    mTokenNames[TOKEN_INVDIGIT]="INVDIGIT";
    mTokenNames[TOKEN_WEIRDCHAR]="WEIRDCHAR";
    mTokenNames[TOKEN_CURSOR]="CURSOR";
}                              

void CTokenizer::AddSimpleToken(EToken pToken,tIt &pI,
                            std::vector<CToken> &pTokens)
{
    CToken lT;
    lT.mToken=pToken;
    lT.mB=pI;
    lT.mE=++pI;
    
    pTokens.push_back(lT);
}

void CTokenizer::TokenizeLineStart(tIt &pB,const tIt &pE,std::vector<CToken> &pTokens,bool pFirstLine)
{
    tIt lB=pB;
    
    while(pB!=pE)
    {
        switch(*pB)
        {
        case ' ':
            ++pB;
            break;
        case '#':
            TokenizeComment(pB,pE,pTokens,false);
            break;
        case '\n':
            if(lB==pB&&lB+1==pE) //empty line before end of input... dedent everything
            {
                while(mIndentStack.back()!=0)
                {
                    pTokens.push_back(CToken(TOKEN_DEDENT,pB,pB));
                    mIndentStack.pop_back();
                }
                pTokens.push_back(CToken(TOKEN_NEWLINE,lB,++pB));
            }
            else
            {
                //empty line
                //add it as a comment, and reset lB
                pTokens.push_back(CToken(TOKEN_COMMENT,lB,++pB));
            }
            lB=pB;
            break;
        default:
            //add the space as a comment, since it is not useful as a token
            if(lB!=pB)
                pTokens.push_back(CToken(TOKEN_COMMENT,lB,pB));
            if(pB-lB>mIndentStack.back()) //indent
            {
                if(!pFirstLine)
                {
                    mIndentStack.push_back(pB-lB);
                    pTokens.push_back(CToken(TOKEN_INDENT,pB,pB));
                }
            }
            else if(pB-lB<mIndentStack.back()) //dedent (or error)
            {
                while(mIndentStack.back()>pB-lB)
                {
                    pTokens.push_back(CToken(TOKEN_DEDENT,pB,pB));
                    mIndentStack.pop_back();
                }
                if(pB-lB!=mIndentStack.back())
                {
                    pTokens.push_back(CToken(TOKEN_DEDENTERROR,pB,pB));
                    mStatus|=STATUS_DEDENT_ERROR;
                }
            }
            return;
        }
    }
    
    //this is good for completion, and probably has no side effects:
    if(lB!=pB)
        pTokens.push_back(CToken(TOKEN_COMMENT,lB,pB));

    if(pB-lB>mIndentStack.back()) //indent
    {
        mIndentStack.push_back(pB-lB);
        pTokens.push_back(CToken(TOKEN_INDENT,pB,pB));
    }
}

/// @pre *pB==L' '
/// @post *pE!=L' '
void CTokenizer::TokenizeSpace(tIt &pB,const tIt &pE,
                          std::vector<CToken> &pTokens)
{
    tIt lI=pB;
    for(++pB;pB!=pE&&*pB==L' ';++pB) {}
    
    pTokens.push_back(CToken(TOKEN_SPACE,lI,pB));
}

void CTokenizer::TokenizeOperator(tIt &pB,const tIt &pE,
                          std::vector<CToken> &pTokens)
{
    tIt lI=pB;
    const EToken *lToken=mOps.GetLongest(pB,pE);
    assert(lToken);
    pTokens.push_back(CToken(*lToken,lI,pB));
}

void CTokenizer::TokenizeString(tIt &pB,const tIt &pE,std::vector<CToken> &pTokens)
{
    tIt lB=pB;

    wchar_t lQuote=*lB;
    EToken lToken;
    
    if(pB+1!=pE&&pB+2!=pE&&*(pB+1)==lQuote&&*(pB+2)==lQuote) //triple quote
    {
        switch(lQuote)
        {
        case '\'':
            lToken=TOKEN_STRING_TS;
            break;
        case '`':
            lToken=TOKEN_STRING_TI;
            break;
        case '\"':
        default:
            lToken=TOKEN_STRING_TD;
        }
            
        for(pB+=3;pB!=pE;++pB)
        {
            if(*pB==lQuote&&pB+1!=pE&&pB+2!=pE&&*(pB+1)==lQuote&&*(pB+2)==lQuote)
            {
                pB+=3;
                pTokens.push_back(CToken(lToken,lB,pB));
                return;
            }
            else if(*pB=='\\')
            {
                ++pB;
                if(pB==pE)
                    break;
            }
        }
        mStatus|=STATUS_STRING_EOI;

        switch(lQuote)
        {
        case '\'':
            lToken=TOKEN_INCOMPLETESTRING_TS;
            break;
        case '`':
            lToken=TOKEN_INCOMPLETESTRING_TI;
            break;
        case '\"':
        default:
            lToken=TOKEN_INCOMPLETESTRING_TD;
        }
        
        pTokens.push_back(CToken(lToken,lB,pB));
    }
    else
    {
        switch(lQuote)
        {
        case '\'':
            lToken=TOKEN_STRING_S;
            break;
        case '`':
            lToken=TOKEN_STRING_I;
            break;
        case '\"':
        default:
            lToken=TOKEN_STRING_D;
        }
        
        bool lError=true;
        
        for(++pB;pB!=pE;++pB)
        {
            if(*pB==lQuote)
            {
                ++pB;
                pTokens.push_back(CToken(lToken,lB,pB));
                return;
            }
            else if(*pB=='\\')
            {
                ++pB;
                if(pB==pE||(*pB==L'\n'&&pB+1==pE))
                {
                    pB=pE;
                    lError=false;
                    break;
                }
            }
            else if(*pB=='\n')
                break;
        }

        if(lError)
            mStatus|=STATUS_STRING_ERROR;
        else
            mStatus|=STATUS_STRING_EOI;

        switch(lQuote)
        {
        case '\'':
            lToken=TOKEN_INCOMPLETESTRING_S;
            break;
        case '`':
            lToken=TOKEN_INCOMPLETESTRING_I;
            break;
        case '\"':
        default:
            lToken=TOKEN_INCOMPLETESTRING_D;
        }
        
        pTokens.push_back(CToken(lToken,lB,pB));
    }
}

void CTokenizer::TokenizePrefixedString(tIt &pB,const tIt &pE,std::vector<CToken> &pTokens)
{
    tIt lI=pB;
    
    if(*lI=='g'||*lI=='e')
    {
        if(++lI==pE)
        {
            TokenizeWord(pB,pE,pTokens);
            return;
        }

        if(*lI!='\''&&*lI!='\"')
        {
            TokenizeWord(pB,pE,pTokens);
            return;
        }
    }
    else if(*lI=='s'||*lI=='l')
    {
        if(++lI==pE)
        {
            TokenizeWord(pB,pE,pTokens);
            return;
        }

        if(*lI!='`')
        {
            TokenizeWord(pB,pE,pTokens);
            return;
        }
    }
    else
    {
        if(*lI=='b'||*lI=='B')
        {
            if(++lI==pE)
            {
                TokenizeWord(pB,pE,pTokens);
                return;
            }
        }
        
        if(*lI=='r'||*lI=='R')
        {
            if(++lI==pE)
            {
                TokenizeWord(pB,pE,pTokens);
                return;
            }
        }
        
        if(*lI!='\''&&*lI!='\"')
        {
            TokenizeWord(pB,pE,pTokens);
            return;
        }
    }
    
    pTokens.push_back(CToken(TOKEN_STRING_PREFIX,pB,lI));
    pB=lI;
    TokenizeString(pB,pE,pTokens);
}

void CTokenizer::TokenizeComment(tIt &pB,const tIt &pE,std::vector<CToken> &pTokens,bool pAddToken)
{
    tIt lI=pB;
    for(++pB;pB!=pE&&*pB!=L'\n';++pB) {}

    if(pAddToken)
        pTokens.push_back(CToken(TOKEN_COMMENT,lI,pB));
}

void CTokenizer::TokenizeEncloser(tIt &pB,const tIt& /*pE*/,std::vector<CToken> &pTokens)
{
    switch(*pB)
    {
    case '(':
        AddSimpleToken(TOKEN_LEFT_PAREN,pB,pTokens);
        mEncloserStack.push_back('(');
        return;
    case '[':
        AddSimpleToken(TOKEN_LEFT_SQUARE,pB,pTokens);
        mEncloserStack.push_back('[');
        return;
    case '{':
        AddSimpleToken(TOKEN_LEFT_BRACE,pB,pTokens);
        mEncloserStack.push_back('{');
        return;
    case ')':
        AddSimpleToken(TOKEN_RIGHT_PAREN,pB,pTokens);
        if(!mEncloserStack.empty()&&mEncloserStack.back()=='(')
            mEncloserStack.pop_back();
        return;
    case ']':
        AddSimpleToken(TOKEN_RIGHT_SQUARE,pB,pTokens);
        if(!mEncloserStack.empty()&&mEncloserStack.back()=='[')
            mEncloserStack.pop_back();
        return;
    case '}':
        AddSimpleToken(TOKEN_RIGHT_BRACE,pB,pTokens);
        if(!mEncloserStack.empty()&&mEncloserStack.back()=='{')
            mEncloserStack.pop_back();
        return;
    default:
        break;
    }
}

void CTokenizer::TokenizeDot(tIt &pB,const tIt &pE,
                          std::vector<CToken> &pTokens)
{
    tIt lI=pB;
    if(token::CPythonFloat::Parse(lI,pE))
    {
        pTokens.push_back(CToken(TOKEN_FLOAT,pB,lI));
        pB=lI;
        return;
    }
    TokenizeOperator(pB,pE,pTokens);
}

void CTokenizer::TokenizeDigit(tIt &pB,const tIt &pE,
                          std::vector<CToken> &pTokens)
{
    //precondition: iswdigit(*pB)
    tIt lI=pB;
    if(token::CPythonFloat::Parse(lI,pE))
    {
        pTokens.push_back(CToken(TOKEN_FLOAT,pB,lI));
        pB=lI;
        return;
    }
    lI=pB;
    if(token::CPythonInteger::Parse(lI,pE))
    {
        pTokens.push_back(CToken(TOKEN_INTEGER,pB,lI));
        pB=lI;
        return;
    }
    //it is an invalid python digit... tokenize all numbers
    lI=pB;
    for(++pB;pB!=pE&&iswdigit(*pB);++pB) {}
    
    pTokens.push_back(CToken(TOKEN_INVDIGIT,lI,pB));
}

void CTokenizer::TokenizeBackslash(tIt &pB,const tIt &pE,
                          std::vector<CToken> &pTokens)
{
    tIt lI=pB;
    if(++lI==pE)
    {
        AddSimpleToken(TOKEN_LINECONT,pB,pTokens);
        return;
    }
    
    if(*lI=='\n')
    {
        pTokens.push_back(CToken(TOKEN_COMMENT,pB,++lI));
        pB=lI;
        if(pB==pE)
            mStatus|=STATUS_LINECONT_EOI;
        return;
    }
    else if(*lI=='i'||*lI=='n'||*lI=='d')
    {
        tIt lO=lI;
        while(++lO!=pE) //otherwise it is just a escaped character, fallback
        {
            if(*lO=='\\')
            {
                pTokens.push_back(CToken(TOKEN_NEWLINE,pB,lO+1));
                pB=lO+1;
                if(*lI=='i')
                {
                    pTokens.push_back(CToken(TOKEN_INDENT,pB,pB));
                }
                else if(*lI=='d')
                {
                    for(int i=0;i<lO-lI;i++)
                        pTokens.push_back(CToken(TOKEN_DEDENT,pB,pB));
                }

                lI=pB;
                while(pB!=pE&&*pB==L' ') ++pB;
                if(lI!=pB) pTokens.push_back(CToken(TOKEN_COMMENT,lI,pB));
                return;
            }
            else if(*lO=='d'&&*lI=='d')
                continue;
            break; //and fallback
        }
    }

    pTokens.push_back(CToken(TOKEN_ESCAPEDCHAR,pB,++lI));
    pB=lI;
}

void CTokenizer::TokenizeDollar(tIt &pB,const tIt &pE,
                          std::vector<CToken> &pTokens)
{
    tIt lI=pB;
    ++pB;
    
    if(pB==pE)
    {
        pTokens.push_back(CToken(TOKEN_OP_DOLLAR,lI,pB));
        return;
    }
    
    switch(*pB)
    {
    case '(':
        pTokens.push_back(CToken(TOKEN_DOLLAR_PAREN,lI,++pB));
        mEncloserStack.push_back('(');
        return;
    case '[':
        pTokens.push_back(CToken(TOKEN_DOLLAR_SQUARE,lI,++pB));
        mEncloserStack.push_back('[');
        return;
    case '{':
        pTokens.push_back(CToken(TOKEN_DOLLAR_BRACE,lI,++pB));
        mEncloserStack.push_back('{');
        return;
    default:
        break;
    }

    if(iswalpha(*pB)||*pB=='_') //short python subexpression $id
    {
        for(++pB;pB!=pE;++pB)
        {
            if(!(iswalnum(*pB)||*pB=='_'))
                break;
        }

        pTokens.push_back(CToken(TOKEN_DOLLAR_IDENTIFIER,lI,pB));
    }
    else
    {
        pTokens.push_back(CToken(TOKEN_OP_DOLLAR,lI,pB));
    }
}

void CTokenizer::TokenizeWord(tIt &pB,const tIt &pE,
                          std::vector<CToken> &pTokens)
{
    tIt lI=pB;

    if(!(iswalpha(*pB)||*pB=='_'))
    {
        pTokens.push_back(CToken(TOKEN_WEIRDCHAR,lI,++pB));
        return;
    }

    for(++pB;pB!=pE;++pB)
    {
        if(!(iswalnum(*pB)||*pB=='_'))
            break;
    }

    //search for keyword
    tIt lK=lI;
    const EToken *lToken=mKeywords.Get(lK,pB);
    if(lToken)
    {
        pTokens.push_back(CToken(*lToken,lI,pB));
        return;
    }    

    pTokens.push_back(CToken(TOKEN_IDENTIFIER,lI,pB));
}

ETokenizeResult CTokenizer::Tokenize(tIt &pB,const tIt &pE,std::vector<CToken> &pTokens)
{
    pTokens.clear();
    mEncloserStack.clear();
    mIndentStack.clear();
    mIndentStack.push_back(0);
    mStatus=STATUS_OK;
    
    tIt lI=pB;

    TokenizeLineStart(lI,pE,pTokens,true);
    
    while(lI!=pE)
    {
        switch(*lI)
        {
        case '\n':
            if(mEncloserStack.empty())
            {
                AddSimpleToken(TOKEN_NEWLINE,lI,pTokens);
                TokenizeLineStart(lI,pE,pTokens,false);
            }
            else
                AddSimpleToken(TOKEN_COMMENT,lI,pTokens);
            break;
        case ' ':
            TokenizeSpace(lI,pE,pTokens);
            break;
        case '$':
            TokenizeDollar(lI,pE,pTokens);
            break;
        case '!':
        case '%':
        case '&':
        case '*':
        case '+':
        case ',':
        case '-':
        case '/':
        case ':':
        case ';':
        case '<':
        case '=':
        case '>':
        case '?':
        case '@':
        case '^':
        case '|':
        case '~':
            TokenizeOperator(lI,pE,pTokens);
            break;
        case '\"':
        case '\'':
        case '`':
            TokenizeString(lI,pE,pTokens);
            break;
        case 'r':
        case 'R':
        case 'b':
        case 'B':
        case 'g':
        case 's':
        case 'e':
        case 'l':
            TokenizePrefixedString(lI,pE,pTokens);
            break;
        case '#':
            TokenizeComment(lI,pE,pTokens);
            break;
        case '(':
        case ')':
        case '[':
        case ']':
        case '{':
        case '}':
            TokenizeEncloser(lI,pE,pTokens);
            break;
        case '.':
            TokenizeDot(lI,pE,pTokens);
            break;
        case '0':
        case '1':
        case '2':
        case '3':
        case '4':
        case '5':
        case '6':
        case '7':
        case '8':
        case '9':
            TokenizeDigit(lI,pE,pTokens);
            break;
        case '\\':
            TokenizeBackslash(lI,pE,pTokens);
            break;
        default:
            TokenizeWord(lI,pE,pTokens);
        }
    }

    pTokens.push_back(CToken(TOKEN_EOI,lI,lI));

    //check token consistency
    //TODO:remove this once we are sure there are no errors
    assert(pTokens.front().mB==pB);
    for(auto i=pTokens.begin();i!=pTokens.end()-1;++i)
    {
        assert(i->mE==(i+1)->mB);
    }
    
    if(mStatus&STATUS_DEDENT_ERROR)
        return TOKENIZE_DEDENT_ERROR;
    if(mStatus&STATUS_STRING_ERROR)
        return TOKENIZE_STRING_ERROR;
    if(mStatus&STATUS_STRING_EOI)
        return TOKENIZE_EOI_STRING;
    if(mStatus&STATUS_LINECONT_EOI)
        return TOKENIZE_EOI_LINECONT;
    if(!mEncloserStack.empty())
        return TOKENIZE_EOI_ENCLOSURE;

//    PrintTokens(pTokens.begin(),pTokens.end());
    
    for(int i=0;i<pTokens.size()-1;++i)
        if(!(pTokens[i].mToken&TOKENTYPE_WHITESPACE))
            return TOKENIZE_OK;

    return TOKENIZE_EMPTY;
}

void CTokenizer::PrintTokens(TTokenIt pB, TTokenIt pE)
{
    for(auto lI=pB;lI!=pE;++lI)
    {
        msg_info() << GetTokenName(lI->mToken) << "\n";
    }
}

/*namespace bush*/ } /*namespace buola*/ }
