//////////////////////////////////////////////////////////////////////////////////
//                                                                              //
//  This file is part of the buola project (https://code.google.com/p/buola/).  //
//                                                                              //
//  Copyright(c) 2007-2012 Xavi Gratal                                          //
//  gratal AT gmail DOT com                                                     //
//                                                                              //
//  Buola is free software: you can redistribute it and/or modify               //
//  it under the terms of the GNU General Public License as published by        //
//  the Free Software Foundation, either version 3 of the License, or           //
//  (at your option) any later version.                                         //
//                                                                              //
//  Buola is distributed in the hope that it will be useful,                    //
//  but WITHOUT ANY WARRANTY; without even the implied warranty of              //
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               //
//  GNU General Public License for more details.                                //
//                                                                              //
//  You should have received a copy of the GNU General Public License           //
//  along with buola.  If not, see <http://www.gnu.org/licenses/>.              //
//                                                                              //
//////////////////////////////////////////////////////////////////////////////////

#include "cparser.h"

#include "ctokenizer.h"
#include "ztoken.h"
#include "ztokentype.h"
#include "../cinterpreter.h"

#include <buola/zz/operators.h>
#include <buola/zz/zsymbols.h>
#include <buola/zz/zeps.h>
#include <buola/zz/zlexeme.h>
#include <buola/zz/zskipoff.h>
#include <buola/zz/zskipon.h>
#include <buola/zz/zskipping.h>
#include <buola/zz/zchar.h>
#include <buola/zz/zinteger.h>
#include <buola/zz/zattr.h>
#include <buola/zz/zraw.h>

#include <buola/app/ccmdline.h>
#include <buola/os/path.h>

#include <set>

//TODO_MAYBE: add something like smart aliases, where a python function is called with the words themselves
//to do something
//this would allow to implement something like 'extract file' which would automatically called the right one
//depending on file extension

//TODO: modify the rule for shell statements, so that it is only triggered if it ends in the right thing!

namespace buola { namespace bush {
    
CCmdLineFlag sDebugParser("debug-parser",L"print debugging information for the parser");

template<typename tType>
struct AMoveBuilder1
{
    template<typename tAttr,typename tCtx>
    void operator()(tAttr &pAttr,tCtx &pCtx) const
    {
        pCtx.mAttribute.reset(new tType(std::move(pAttr)));
    }
};

template<typename tType>
struct ABuilder3
{
    template<typename tAttr,typename tCtx>
    void operator()(const tAttr &pAttr,tCtx &pCtx) const
    {
        pCtx.mAttribute.reset(new tType(std::get<0>(pAttr)[0],std::get<0>(pAttr)[1],std::get<1>(pAttr)));
    }
};

template<typename tType>
struct ABuilder0
{
    template<typename tCtx>
    void operator()(UUnused,tCtx &pCtx) const
    {
        pCtx.mAttribute.reset(new tType());
    }
};

struct AAdder
{
    template<typename tAttr,typename tCtx>
    void operator()(const tAttr &pAttr,tCtx &pCtx) const
    {
        pCtx.mAttribute->Add(pAttr);
    }
};

struct ASeparatorAdder
{
    template<typename tAttr,typename tCtx>
    void operator()(const tAttr &pAttr,tCtx &pCtx) const
    {
        pCtx.mAttribute->AddSeparator(pAttr.mToken);
    }
};

struct ASetter
{
    template<typename tAttr,typename tCtx>
    void operator()(const tAttr &pAttr,tCtx &pCtx) const
    {
        pCtx.mAttribute=pAttr;
    }
};

struct AAddWordPart
{
    template<typename tAttr,typename tCtx>
    void operator()(const tAttr &pAttr,tCtx &pCtx) const
    {
        pCtx.mAttribute.AddNormal(pAttr);
    }
};

struct AAddBracePart
{
    template<typename tAttr,typename tCtx>
    void operator()(const tAttr &pAttr,tCtx &pCtx) const
    {
        pCtx.mAttribute.AddBraces(pAttr);
    }
};

CParser::CParser()
{
    CreateGrammar();
}

void CParser::CreateGrammar()
{
//python grammar from Grammar/Grammar in the python distribution
    //changes:
    //  - removed operator precedence
    //  - changed for function keyword arguments
    //  - moved the preceding colon into the suite        
    //  - moved the decorators to rCompoundStmt
    //  - created else and finally statements
    //  - removed rYieldStmt
    //  - created rOptionalEqualTest
    //  - added tiny statement, replacing break,continue,pass
    //  - switched evaluation order for rArgument
    //  - sorted from general to particular
    
    ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
    //  python building blocks
    //
    //    these are parts of expressions or statements which are not interpreted by bush, only used to detect
    //    more complex expressions
    //

    // function parameters
    rParameters=ztoken(TOKEN_LEFT_PAREN) >> -rTypedArgsList >> ztoken(TOKEN_RIGHT_PAREN);
    rOptionalEqualTest=-(ztoken(TOKEN_OP_EQUAL) >> rTest);
    rTypedArgsList=alternative(sequence((rTFPDef >> rOptionalEqualTest)%ztoken(TOKEN_OP_COMMA),
                                      -(ztoken(TOKEN_OP_COMMA) >> -(sequence(ztoken(TOKEN_OP_STAR),-rTFPDef,
                                            *sequence(ztoken(TOKEN_OP_COMMA),rTFPDef,rOptionalEqualTest),
                                            -sequence(ztoken(TOKEN_OP_COMMA),ztoken(TOKEN_OP_POWER),rTFPDef)) |
                                            ztoken(TOKEN_OP_POWER) >> rTFPDef))),
                             sequence(ztoken(TOKEN_OP_STAR),-rTFPDef,
                                        *sequence(ztoken(TOKEN_OP_COMMA),rTFPDef,rOptionalEqualTest),
                                        -sequence(ztoken(TOKEN_OP_COMMA),ztoken(TOKEN_OP_POWER),rTFPDef)),
                             ztoken(TOKEN_OP_POWER) >> rTFPDef);
    rTFPDef=ztoken(TOKEN_IDENTIFIER) >> -(ztoken(TOKEN_OP_COLON) >> rTest);
    rVarArgsList=alternative(sequence((rVFPDef >> rOptionalEqualTest)%ztoken(TOKEN_OP_COMMA),
                                      -(ztoken(TOKEN_OP_COMMA) >> -(sequence(ztoken(TOKEN_OP_STAR),-rVFPDef,
                                            *sequence(ztoken(TOKEN_OP_COMMA),rVFPDef,rOptionalEqualTest),
                                            -sequence(ztoken(TOKEN_OP_COMMA),ztoken(TOKEN_OP_POWER),rVFPDef)) |
                                            ztoken(TOKEN_OP_POWER) >> rVFPDef))),
                             sequence(ztoken(TOKEN_OP_STAR),-rVFPDef,
                                        *sequence(ztoken(TOKEN_OP_COMMA),rVFPDef,rOptionalEqualTest),
                                        -sequence(ztoken(TOKEN_OP_COMMA),ztoken(TOKEN_OP_POWER),rVFPDef)),
                             ztoken(TOKEN_OP_POWER) >> rVFPDef);
    rVFPDef=ztoken(TOKEN_IDENTIFIER);
    
    //argument list
    rArgList=*(rArgument >> ztoken(TOKEN_OP_COMMA)) >> (rArgument >> -ztoken(TOKEN_OP_COMMA) |
                        ztoken(TOKEN_OP_STAR) >> rTest >> *(ztoken(TOKEN_OP_COMMA) >> rArgument) >>
                        -(ztoken(TOKEN_OP_COMMA) >> ztoken(TOKEN_OP_POWER) >> rTest)
                        | ztoken(TOKEN_OP_POWER) >> rTest);
            //I think it is safe to use IDENTIFIER here instead of test, but it's different in the
            //official grammar
    rArgument=sequence(zcomptoken(TOKEN_IDENTIFIER,COMP_KEYWORDARG),ztoken(TOKEN_OP_EQUAL),rTest) | rTest >> -rCompFor;

    //comprehensions
    rCompIter=rCompFor|rCompIf;
    rCompFor=sequence(ztoken(TOKEN_KW_FOR),rExprList,ztoken(TOKEN_KW_IN),rBoolTest,-rCompIter);
    rCompIf=sequence(ztoken(TOKEN_KW_IF),rTestNoCond,-rCompIter);

    //trailers
    rTrailer=alternative(sequence(ztoken(TOKEN_LEFT_PAREN),-rArgList,ztoken(TOKEN_RIGHT_PAREN)),
                         sequence(ztoken(TOKEN_LEFT_SQUARE),rSubscriptList,ztoken(TOKEN_RIGHT_SQUARE)),
                         ztoken(TOKEN_OP_DOT) >> zcomptoken(TOKEN_IDENTIFIER,COMP_AFTERDOT));
    rSubscriptList=rSubscript%ztoken(TOKEN_OP_COMMA) >> -ztoken(TOKEN_OP_COMMA);
    rSubscript=-rTest >> ztoken(TOKEN_OP_COLON) >> -rTest >> -rSliceOp | rTest;
    rSliceOp=ztoken(TOKEN_OP_COLON) >> -rTest;

    //subexpressions
    rLambDef=ztoken(TOKEN_KW_LAMBDA) >> -rVarArgsList >> ztoken(TOKEN_OP_COLON) >> rTest;
    rLambDefNoCond=ztoken(TOKEN_KW_LAMBDA) >> -rVarArgsList >> ztoken(TOKEN_OP_COLON) >> rTestNoCond;

    //since we are not really compiling and we don't care about precedence, we can simplify 
    //these rules like this:
    rBoolTest=rNotTest%ztokentype(TOKENTYPE_BOOLBINOP);
    rNotTest=ztoken(TOKEN_KW_NOT) >> rNotTest | rComparison;

    rComparison=rExpr%rCompOp;
    rCompOp=ztokentype(TOKENTYPE_COMPAREOP)|
                    (ztoken(TOKEN_KW_IS)>>-ztoken(TOKEN_KW_NOT))|
                    (-ztoken(TOKEN_KW_NOT)>>ztoken(TOKEN_KW_IN));
    rStarExpr=ztoken(TOKEN_OP_STAR) >> rExpr;
    //this is a recombination of all binary operators
    rExpr=rUExpr % ztokentype(TOKENTYPE_BINARYOP); //binary expression
    //this is a recombination of factor and power
    rUExpr=*ztokentype(TOKENTYPE_UNARYOP) >> (rAtom>>*rTrailer); //unary expression
    rAtom=alternative(sequence(ztoken(TOKEN_LEFT_PAREN),-(rYieldExpr|rTestListComp),ztoken(TOKEN_RIGHT_PAREN)),
                      sequence(ztoken(TOKEN_LEFT_SQUARE),-rTestListComp,ztoken(TOKEN_RIGHT_SQUARE)),
                      sequence(ztoken(TOKEN_LEFT_BRACE),-rDictOrSetMaker,ztoken(TOKEN_RIGHT_BRACE)),
                      zcomptoken(TOKEN_IDENTIFIER,COMP_ATOM),
                      ztokentype(TOKENTYPE_NUMBER),
                      +(-ztoken(TOKEN_STRING_PREFIX)>>ztokentype(TOKENTYPE_STRING)),
                      ztoken(TOKEN_OP_ELLIPSIS),
                      ztokentype(TOKENTYPE_LITERALKW));
    rExprList=(rExpr|rStarExpr)%ztoken(TOKEN_OP_COMMA) >> -ztoken(TOKEN_OP_COMMA);
    rDictOrSetMaker=((rTest >> ztoken(TOKEN_OP_COLON) >> rTest >> (rCompFor | *(ztoken(TOKEN_OP_COMMA) >> rTest >> ztoken(TOKEN_OP_COLON) >> rTest) >> -ztoken(TOKEN_OP_COMMA))) |
                (rTest >> (rCompFor | *(ztoken(TOKEN_OP_COMMA) >> rTest) >> -ztoken(TOKEN_OP_COMMA))));
    
    //import parts
    rImportName=ztoken(TOKEN_KW_IMPORT) >> rDottedAsNames;
    rImportFrom=(ztoken(TOKEN_KW_FROM) >> (*ztokentype(TOKENTYPE_DOTS) >> rDottedName | 
                +ztokentype(TOKENTYPE_DOTS)) >> ztoken(TOKEN_KW_IMPORT) >>
                (ztoken(TOKEN_OP_STAR) | ztoken(TOKEN_LEFT_PAREN) >> rImportAsNames >>
                ztoken(TOKEN_RIGHT_PAREN) | rImportAsNames));
    rImportAsName=zcomptoken(TOKEN_IDENTIFIER,COMP_IMPORT_FROM) >> -(ztoken(TOKEN_KW_AS) >> ztoken(TOKEN_IDENTIFIER));
    rDottedAsName=rDottedName >> -(ztoken(TOKEN_KW_AS) >> ztoken(TOKEN_IDENTIFIER));
    rImportAsNames=rImportAsName%ztoken(TOKEN_OP_COMMA)>>-ztoken(TOKEN_OP_COMMA);
    rDottedAsNames=rDottedAsName%ztoken(TOKEN_OP_COMMA);
    rDottedName=zcomptoken(TOKEN_IDENTIFIER,COMP_DOTTEDNAME)%ztoken(TOKEN_OP_DOT);

    //used in class and function definitions
    rDecorator=sequence(ztoken(TOKEN_OP_AT),
                        rDottedName,
                        -sequence(ztoken(TOKEN_LEFT_PAREN),-rArgList,ztoken(TOKEN_RIGHT_PAREN)),
                        ztoken(TOKEN_NEWLINE));

    //used in with and except definitions
    rWithItem=rTest >> -(ztoken(TOKEN_KW_AS) >> rExpr);
    rExceptClause=ztoken(TOKEN_KW_EXCEPT) >> -(rTest >> -(ztoken(TOKEN_KW_AS) >> ztoken(TOKEN_IDENTIFIER)));
    
    ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
    //  python expressions
    //
    //    these are not statements by themselves, but can be used in other constructions, and it is sometimes
    //    necessary to localize them by themselved
    //
    
    //basic expression
    rTest=rBoolTest >> -(ztoken(TOKEN_KW_IF) >> rBoolTest >> ztoken(TOKEN_KW_ELSE) >> rTest) | rLambDef;
    //no-conditional expression. It is used for the definition of 'if'
    rTestNoCond=rBoolTest|rLambDefNoCond;
    //this is a typical rvalue
    rTestList=rTest%ztoken(TOKEN_OP_COMMA) >> -ztoken(TOKEN_OP_COMMA);
    //this is a typical lvalue
    rTestListStarExpr=(rTest|rStarExpr)%ztoken(TOKEN_OP_COMMA) >> -ztoken(TOKEN_OP_COMMA);
    //this can appeared parenthesized to create a list or tuple
    rTestListComp=(rTest|rStarExpr) >> (rCompFor | *(ztoken(TOKEN_OP_COMMA) >> 
                        (rTest|rStarExpr)) >> -ztoken(TOKEN_OP_COMMA));

    ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
    //  simple statements
    //
    //    these are not statements by themselves, but can be used in other constructions, and it is sometimes
    //    necessary to localize them by themselved
    //

    //this is an assignment or just an expression
    rExprStmt=*ztokentype(TOKENTYPE_WHITESPACE) >> raw((rTestListStarExpr >> (ztokentype(TOKENTYPE_AUGASSIGNOP) >> (rYieldExpr|rTestList) |
                 *(ztoken(TOKEN_OP_EQUAL) >> (rYieldExpr|rTestListStarExpr)))) >> &(ztoken(TOKEN_OP_SEMICOLON)|ztoken(TOKEN_NEWLINE)))
                    ++[([this](const TTokenRange &pPair){mCurrentListStmt.mStmts.emplace_back(SSmallStmt::EXPR,pPair.first,pPair.second);})];
    rShellStmt=*ztokentype(TOKENTYPE_WHITESPACE) >> raw(rShellPipeline)++[([this](const TTokenRange &pPair){mCurrentListStmt.mStmts.emplace_back(SSmallStmt::SHELL,pPair.first,pPair.second);})];
    //these are simple one-liners that can be part of a small statement
    rDelStmt=ztoken(TOKEN_KW_DEL) >> rExprList;
    rReturnStmt=zcomptoken(TOKEN_KW_RETURN,COMP_SIMPLEKW) >> -rTestList;
    rRaiseStmt=ztoken(TOKEN_KW_RAISE) >> -(rTest >> -(ztoken(TOKEN_KW_FROM)>>rTest));
    rYieldExpr=ztoken(TOKEN_KW_YIELD) >> -rTestList;
    rFlowStmt=alternative(rReturnStmt,rRaiseStmt,rYieldExpr);
    rImportStmt=rImportName|rImportFrom;
    rGlobalStmt=ztoken(TOKEN_KW_GLOBAL)>>zcomptoken(TOKEN_IDENTIFIER,COMP_GLOBAL)%ztoken(TOKEN_OP_COMMA);
    rNonlocalStmt=ztoken(TOKEN_KW_NONLOCAL)>>zcomptoken(TOKEN_IDENTIFIER,COMP_GLOBAL)%ztoken(TOKEN_OP_COMMA);
    rAssertStmt=ztoken(TOKEN_KW_ASSERT) >> rTest >> -(ztoken(TOKEN_OP_COMMA)>>rTest);
    //small statement
    rSmallStmt=rExprStmt|raw(alternative(rExprStmt,rDelStmt,ztokentype(TOKENTYPE_TINYSTATEMENT),rFlowStmt,
                rImportStmt,rGlobalStmt,rNonlocalStmt,rAssertStmt) >> &(ztoken(TOKEN_OP_SEMICOLON)|ztoken(TOKEN_NEWLINE)))
                    ++[([this](const TTokenRange &pPair){mCurrentListStmt.mStmts.emplace_back(SSmallStmt::OTHER,pPair.first,pPair.second);})];
    rListStmt=zz::ZEps()++[([this]{mCurrentListStmt=SListStmt();})] >>
                ((rSmallStmt|rShellStmt)%zattrtokentype(TOKENTYPE_LISTSEPARATOR)++[([this](const CToken &pToken){mCurrentListStmt.mSeparators.push_back(pToken.mToken);})])
                    ++[([this]{mListStmts.push_back(mCurrentListStmt);})];
    //just small statements separated by semicolon
    rSimpleStmt=sequence(rListStmt%ztoken(TOKEN_OP_SEMICOLON),-ztoken(TOKEN_OP_SEMICOLON),ztoken(TOKEN_NEWLINE));

    ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
    //  compound statements
    //
    //    these are not statements by themselves, but can be used in other constructions, and it is sometimes
    //    necessary to localize them by themselved
    //

    rStmt=rCompoundStmt|rSimpleStmt;
    //this forms the body of a compund statement
    rSuite=ztoken(TOKEN_OP_COLON) >> (rSimpleStmt | ztoken(TOKEN_NEWLINE) >> 
                (ztoken(TOKEN_EOI)++[([this]{mParserStatus=STATUS_EOIINSUITESTART;})] | 
                ztoken(TOKEN_INDENT) >> +rStmt >> (ztoken(TOKEN_EOI)++[([this]{mParserStatus=STATUS_EOIINSUITEEND;})]|ztoken(TOKEN_DEDENT))));

    //these can be included as part of some of the others
    rElseStmt=-(ztoken(TOKEN_KW_ELSE) >> rSuite);
    rFinallyStmt=ztoken(TOKEN_KW_FINALLY) >> rSuite;

    //the basic compound statements
    rIfStmt=sequence(ztoken(TOKEN_KW_IF),
                     rTest,
                     rSuite,
                     *sequence(ztoken(TOKEN_KW_ELIF),rTest,rSuite),
                     rElseStmt);
    rWhileStmt=sequence(ztoken(TOKEN_KW_WHILE),rTest,rSuite,rElseStmt);
    rForStmt=sequence(zcomptoken(TOKEN_KW_FOR,COMP_COMPKW),rExprList,ztoken(TOKEN_KW_IN),rTestList,rSuite,rElseStmt);
    rTryStmt=sequence(ztoken(TOKEN_KW_TRY),
                      rSuite,
                      sequence(+(rExceptClause >> rSuite),rElseStmt,-rFinallyStmt)|rFinallyStmt);
    rWithStmt=sequence(ztoken(TOKEN_KW_WITH),rWithItem,*(ztoken(TOKEN_OP_COMMA) >> rWithItem),rSuite);
    rFuncDef=ztoken(TOKEN_KW_DEF) >> ztoken(TOKEN_IDENTIFIER) >> rParameters >>
                -(ztoken(TOKEN_OP_RARROW) >> rTest) >> rSuite;
    rClassDef=sequence(ztoken(TOKEN_KW_CLASS),
                       ztoken(TOKEN_IDENTIFIER),
                       -sequence(ztoken(TOKEN_LEFT_PAREN),
                                 -rArgList,ztoken(TOKEN_RIGHT_PAREN)),
                       rSuite);
    //compound statement
    rCompoundStmt=alternative(rIfStmt,rWhileStmt,rForStmt,rTryStmt,rWithStmt,
                              *rDecorator >> (rFuncDef | rClassDef));
    

    //NOTE:added the initial indent token to allow the first line to start with some space... it shouldn't be harmful
    
    rSingleInput=alternative(ztoken(TOKEN_NEWLINE),rCompoundStmt >> -ztoken(TOKEN_NEWLINE),rSimpleStmt) >> ztoken(TOKEN_EOI);
    rFileInput=*(ztoken(TOKEN_NEWLINE) | rStmt) >> ztoken(TOKEN_EOI);
    rEvalInput=sequence(rTestList,*ztoken(TOKEN_NEWLINE),ztoken(TOKEN_EOI));


    
    ///////////////////////
    // shell expressions
    //
    rShellInput%=rShellPipeline >> *ztoken(TOKEN_NEWLINE) >> ztoken(TOKEN_EOI);
    
    rShellSinglePythonSubExpr=ztoken(TOKEN_DOLLAR_IDENTIFIER)|sequence(ztoken(TOKEN_DOLLAR_PAREN),
                                     skipping(ztokentype(TOKENTYPE_ANYSPACE),rTestList),ztoken(TOKEN_RIGHT_PAREN));
    rShellMultiPythonSubExpr=rShellSinglePythonSubExpr|sequence(ztoken(TOKEN_DOLLAR_SQUARE),
                                     skipping(ztokentype(TOKENTYPE_ANYSPACE),rTestList),ztoken(TOKEN_RIGHT_SQUARE));
    rShellSingleWord=+raw(+zcomptokentype(TOKENTYPE_WORDPART|TOKENTYPE_COMMA,COMP_SHELL)|rShellSinglePythonSubExpr|
                            (ztoken(TOKEN_OP_DOLLAR)>>(ztoken(TOKEN_STRING_I)|ztoken(TOKEN_STRING_TI))))++[AAddWordPart()];
    rShellMultiWord=+(raw(+zcomptokentype(TOKENTYPE_WORDPART|TOKENTYPE_COMMA,COMP_SHELL)|rShellMultiPythonSubExpr|
                           (-ztoken(TOKEN_OP_DOLLAR)>>(ztoken(TOKEN_STRING_I)|ztoken(TOKEN_STRING_TI))))++[AAddWordPart()]|
                      rShellBraceExpr++[AAddWordPart()]);
    rShellMultiWordNoComma=+(raw(+zcomptokentype(TOKENTYPE_WORDPART,COMP_SHELL)|rShellMultiPythonSubExpr|
                           ztoken(TOKEN_STRING_I)|ztoken(TOKEN_STRING_TI))++[AAddWordPart()]|
                      rShellBraceExpr++[AAddWordPart()]);
    rShellBraceExpr=ztoken(TOKEN_LEFT_BRACE) >> rShellMultiWordNoComma++[AAddBracePart()]%ztoken(TOKEN_OP_COMMA) >> ztoken(TOKEN_RIGHT_BRACE);
    
    rShellRedirection=(zz::lexeme((zattrtoken(TOKEN_INTEGER)|zz::attr(CToken()))>>zattrtokentype(TOKENTYPE_REDIRECTOP)) >> 
                       zz::lexeme(rShellSingleWord))++[ABuilder3<CRedirection>()];
    rShellCmd=(+zz::lexeme(rShellMultiWord-(ztoken(TOKEN_INTEGER) >> 
                     ztokentype(TOKENTYPE_REDIRECTOP))))++[AMoveBuilder1<CCommand>()] >> *rShellRedirection++[AAdder()];
    rShellPipeline=(-ztoken(TOKEN_OP_EXCLAMATION))++[ABuilder0<CPipeline>()] >> (rShellCmd++[AAdder()]%
                zattrtokentype(TOKENTYPE_PIPELINESEPARATOR)++[ASeparatorAdder()]) >> -(zattrtokentype(TOKENTYPE_PIPELINEFINISHER)++[ASeparatorAdder()]);
}

void CParser::ExpandAliases(std::vector<CToken> &pTokens,const std::set<std::wstring> &pAlready)
{
    bool lStart=true;
    
    for(int i=0;i<pTokens.size();i++)
    {
        if(pTokens[i].mToken&TOKENTYPE_PIPELINESEPARATOR)
        {
            lStart=true;
            continue;
        }
        if(pTokens[i].mToken&TOKENTYPE_WHITESPACE)
            continue;
        
        if(lStart&&pTokens[i].mToken&TOKENTYPE_WORDPART)
        {
            int j;
            for(j=i+1;j<pTokens.size()&&pTokens[j].mToken&TOKENTYPE_WORDPART;++j) {}
            
            std::wstring lID;
            for(int k=i;k<j;k++)
                lID.append(pTokens[k].mB,pTokens[k].mE);
            CAlias *lAlias=gInterpreter->GetAlias(lID);
            
            if(lAlias&&pAlready.find(lID)==pAlready.end()) //found and not already expanded
            {
                std::vector<CToken> lTokens=lAlias->mTokens;
                std::set<std::wstring> lAlready=pAlready;
                lAlready.insert(lID);
                ExpandAliases(lTokens,lAlready);
                
                pTokens.erase(pTokens.begin()+i,pTokens.begin()+j);
                pTokens.insert(pTokens.begin()+i,lTokens.begin(),lTokens.end());
                i+=lTokens.size()-(j-i);
            }
        }
        lStart=false;
    }
}

PPipeline CParser::ParseShellLine(const std::wstring& pLine,std::vector<CToken> &pTokens)
{
    std::wstring::const_iterator lI=pLine.begin();
    
    ETokenizeResult lTokenize=mTokenizer.Tokenize(lI,pLine.end(),pTokens);

    switch(lTokenize)
    {
    case TOKENIZE_DEDENT_ERROR:    
    case TOKENIZE_STRING_ERROR:
    case TOKENIZE_EOI_ENCLOSURE:
    case TOKENIZE_EOI_STRING:
    case TOKENIZE_EOI_LINECONT:
        return nullptr;
    case TOKENIZE_OK:
    default:
        break;
    }
    
    ExpandAliases(pTokens);
    
    TTokenIt lTI=pTokens.begin(),lTE=pTokens.end();

    mParserStatus=STATUS_OK;
    PPipeline lPipeline;
    bool lResult=zz::phrase_parse(lTI,lTE,rShellInput,ztokentype(TOKENTYPE_ANYSPACE),lPipeline);
    
    if(lResult&&lTI==lTE)
        return lPipeline;

    mTokenizer.PrintTokens(pTokens.begin(),pTokens.end());
    
    return nullptr;
}

EParseResult CParser::ParseLine(const std::wstring& pLine,std::wstring &pParsedLine,std::vector<int> &pIndent,
                                ERunMode pRunMode)
{
    std::wstring::const_iterator lI=pLine.begin();
    std::vector<CToken> lTokens;

    ETokenizeResult lTokenize=mTokenizer.Tokenize(lI,pLine.end(),lTokens);
    
    switch(lTokenize)
    {
    case TOKENIZE_DEDENT_ERROR:    
        return PARSE_DEDENT_ERROR;
    case TOKENIZE_STRING_ERROR:
        return PARSE_STRING_ERROR;
    case TOKENIZE_EOI_ENCLOSURE:
    case TOKENIZE_EOI_STRING:
    case TOKENIZE_EOI_LINECONT:
        pIndent=mTokenizer.GetIndentLevels();
        return PARSE_INCOMPLETE;
    case TOKENIZE_EMPTY:
        return PARSE_EMPTY;
    case TOKENIZE_OK:
    default:
        break;
    }
    
    TTokenIt lTI=lTokens.begin(),lTE=lTokens.end();

    mParserStatus=STATUS_OK;
    mListStmts.clear();
    bool lResult;
    if(pRunMode==RUN_FILE)
        lResult=zz::phrase_parse(lTI,lTE,rFileInput,ztokentype(TOKENTYPE_WHITESPACE));
    else if(pRunMode==RUN_EXPRESSION)
        lResult=zz::phrase_parse(lTI,lTE,rEvalInput,ztokentype(TOKENTYPE_WHITESPACE));
    else
        lResult=zz::phrase_parse(lTI,lTE,rSingleInput,ztokentype(TOKENTYPE_WHITESPACE));

    if(mParserStatus==STATUS_EOIINSUITESTART)
    {
        pIndent=mTokenizer.GetIndentLevels();
        pIndent.push_back(mTokenizer.GetLastIndent()+3);
        return PARSE_INCOMPLETE;
    }
    else if(mParserStatus==STATUS_EOIINSUITEEND)
    {
        pIndent=mTokenizer.GetIndentLevels();
        return PARSE_INCOMPLETE;
    }

    if(lResult&&lTI==lTE)
    {
        //parsing was succesful, so we need to generate a parsed statement, with the necessary changes
        pParsedLine.clear();
        pParsedLine.reserve(pLine.size()*2); //it can grow bigger... this looks efficient

        GenerateParsedLine(pParsedLine,lTokens);
        
        return PARSE_OK;
    }

    mTokenizer.PrintTokens(lTokens.begin(),lTokens.end());
    
    return PARSE_ERROR;
}

bool CParser::ParseAlias(CAlias& pAlias)
{
    std::wstring::const_iterator lI=pAlias.mExpansion.begin();

    ETokenizeResult lTokenize=mTokenizer.Tokenize(lI,pAlias.mExpansion.end(),pAlias.mTokens);

    switch(lTokenize)
    {
    case TOKENIZE_DEDENT_ERROR:    
    case TOKENIZE_STRING_ERROR:
    case TOKENIZE_EOI_ENCLOSURE:
    case TOKENIZE_EOI_STRING:
    case TOKENIZE_EOI_LINECONT:
        return false;
    case TOKENIZE_OK:
    default:
        break;
    }

    if(pAlias.mTokens.back().mToken!=TOKEN_EOI)
        return false;
    pAlias.mTokens.pop_back();

    //now we check that it actually is a valid alias (should be a pipeline expression)
    TTokenIt lTI=pAlias.mTokens.begin(),lTE=pAlias.mTokens.end();

    mParserStatus=STATUS_OK;
    bool lResult=zz::phrase_parse(lTI,lTE,rShellPipeline,ztokentype(TOKENTYPE_WHITESPACE));
    
    if(!lResult||lTI!=lTE)
        return false;
    
    return true;
}

void CParser::ParsePartial(std::wstring::const_iterator pB,std::wstring::const_iterator pE,CPartialMatch &pMatch)
{
    std::wstring::const_iterator lI=pB;

    mTokenizer.Tokenize(lI,pE,pMatch.mTokens);
    pMatch.mTokens.erase(pMatch.mTokens.end()-1); //erase TOKEN_EOI

    //if the last token is an alphanumeric one, consider it the cursor.
    if(!pMatch.mTokens.empty()&&pMatch.mTokens.back().mToken&TOKENTYPE_ALPHANUM)
    {
        pMatch.mTokens.back().mToken=TOKEN_CURSOR;
    }
    else //otherwise, add the cursor at the very end
    {
        pMatch.mTokens.emplace_back(TOKEN_CURSOR,pE,pE);
    }

    TTokenIt lCursor=pMatch.mTokens.end()-1;
    lCursor->mComplete=COMP_NONE;
    
    //run the parser to check for completability
    TTokenIt lTB=pMatch.mTokens.begin(),lTE=pMatch.mTokens.end();
    zz::phrase_parse(lTB,lTE,rSingleInput,ztokentype(TOKENTYPE_WHITESPACE));
    ECompContext lContext=lCursor->mComplete;
    
    pMatch.mContext=COMP_NONE;

    if(lContext==COMP_NONE)
        return;
        
    //now, search backwards for the beginning of the statement
    TTokenIt lBeginning=pMatch.mTokens.end();
    
    while(lBeginning!=pMatch.mTokens.begin())
    {
        --lBeginning;
        if(lBeginning->mToken&TOKENTYPE_PARTIALBEGIN)
        {
            ++lBeginning;
            break;
        }
    }
    
    //skip whitespace
    while(lBeginning!=pMatch.mTokens.end()&&lBeginning->mToken&TOKENTYPE_WHITESPACE)
        ++lBeginning;
    

    //if the beginning is at the very end, do a mixed python/shell completion
    bool lShell=(lContext&COMP_SHELL);
    bool lPython=(lContext&COMP_PYTHON_ANY);

    if(lShell&&lPython) //we need to settle for one, unless we are immediately after a partial begin
    {
        bool lSingleWord=true;
        
        for(auto i=lBeginning;i!=pMatch.mTokens.end();++i)
        {
            if(i->mToken&TOKENTYPE_WHITESPACE)
            {
                lSingleWord=false;
                break;
            }
        }
        
        if(lSingleWord) //in this case, take both
        {
            pMatch.mWordB=lBeginning->mB;
            pMatch.mWordE=lCursor->mE;
            pMatch.mPythonPart=std::wstring(lCursor->mB,lCursor->mE);
            pMatch.mPythonAdd=std::wstring(lBeginning->mB,lCursor->mB);
            pMatch.mWords.emplace_back(lBeginning,pMatch.mTokens.end());
            pMatch.mContext=lContext;
            return;
        }
        
        //else, decide which one to take
        if(lBeginning->mToken&(TOKENTYPE_SIMPLEKW|TOKENTYPE_COMPKW))
        {
            lShell=false;
        }
        else
        {
            lPython=false;
        }
    }
    
    if(lShell)
    {
        //we only want the current command, so check if there is a pipeline later on, and set that as the real beginning
        TTokenIt lPipeB=lBeginning;
        lBeginning=pMatch.mTokens.end();
        
        while(lBeginning!=lPipeB)
        {
            --lBeginning;
            if(lBeginning->mToken&TOKENTYPE_PIPELINESEPARATOR||lBeginning->mToken==TOKEN_OP_EXCLAMATION)
            {
                ++lBeginning;
                break;
            }
        }
        //again, skip whitespace
        while(lBeginning!=pMatch.mTokens.end()&&lBeginning->mToken&TOKENTYPE_WHITESPACE)
            ++lBeginning;
        
        //now, create some words... just skipping crap
        for(TTokenIt lB=lBeginning;lB!=pMatch.mTokens.end();++lB)
        {
            if(!(lB->mToken&(TOKENTYPE_WORDPART|TOKENTYPE_OTHERWORDPART)||lB->mToken==TOKEN_CURSOR)) continue;
            
            TTokenRange lWord;
            lWord.first=lB;
            for(++lB;lB!=pMatch.mTokens.end();++lB)
            {
                if(!(lB->mToken&(TOKENTYPE_WORDPART|TOKENTYPE_OTHERWORDPART)||lB->mToken==TOKEN_CURSOR)) break;
            }
            lWord.second=lB;
            pMatch.mWords.push_back(lWord);
            if(lB==pMatch.mTokens.end()) break;
        }
        assert(!pMatch.mWords.empty()&&(pMatch.mWords.back().second-1)->mE==pE);

        pMatch.mWordB=pMatch.mWords.back().first->mB;
        pMatch.mWordE=(pMatch.mWords.back().second-1)->mE;
        pMatch.mContext=COMP_SHELL;
    }
    if(lPython)
    {
        pMatch.mWordB=lCursor->mB;
        pMatch.mWordE=lCursor->mE;
        pMatch.mPythonPart=std::wstring(lCursor->mB,lCursor->mE);
        pMatch.mPythonAdd=std::wstring();
        pMatch.mContext=lContext&COMP_PYTHON_ANY;
    }
}

void CParser::AppendQuoted(std::wstring::const_iterator pB,std::wstring::const_iterator pE,std::wstring &pDst)
{
    for(std::wstring::const_iterator lI=pB;lI!=pE;++lI)
    {
        switch(*lI)
        {
        case '\'':
            pDst+=L"\\\'";
            break;
        case '\\':
            pDst+=L"\\\\";
            break;
        case '\n':
            pDst+=L"\\n";
            break;
        default:
            pDst+=*lI;
        }
    }
}

void CParser::AppendTransQuoted(std::wstring::const_iterator pB,std::wstring::const_iterator pE,std::wstring &pDst)
{
    for(std::wstring::const_iterator lI=pB;lI!=pE;++lI)
    {
        switch(*lI)
        {
        case '\'':
            pDst+=L"\\\'";
            break;
        case '\\':
            pDst+=*(lI++);
            assert(lI!=pE);
        default:
            pDst+=*lI;
        }
    }
}

void CParser::AppendInvertedString(const CToken &pToken,std::wstring &pDst)
{
    int lNQuotes;
    pDst+=L"shellout(";
    if(pToken.mToken==TOKEN_STRING_TI)
    {
        lNQuotes=3;
        pDst+=L"'''";
    }
    else
    {
        lNQuotes=1;
        pDst+=L'\'';
    }
    
    AppendTransQuoted(pToken.mB+lNQuotes,pToken.mE-lNQuotes,pDst);

    if(pToken.mToken==TOKEN_STRING_TI)
    {
        pDst+=L"''')";
    }
    else
    {
        pDst+=L"')";
    }
}

void CParser::AppendGlobString(const CToken &pToken,std::wstring &pDst)
{
    pDst+=L"shellglob(";
    pDst.append(pToken.mB,pToken.mE);
    pDst+=L')';
}

void CParser::AppendEnvString(const CToken &pToken,std::wstring &pDst)
{
    pDst+=L"environ[";
    pDst.append(pToken.mB,pToken.mE);
    pDst+=L']';
}

bool CParser::ShouldParseAsShell(const SSmallStmt& pS)
{
    if(pS.mType==SSmallStmt::SHELL) return true;
    if(pS.mType==SSmallStmt::OTHER) return false;
    
    TTokenIt lI=pS.mB;

    mParserStatus=STATUS_OK;
    PPipeline lPipeline;
    bool lResult=zz::phrase_parse(lI,pS.mE,rShellPipeline,ztokentype(TOKENTYPE_WHITESPACE),lPipeline);
    
    if(!lResult||lI!=pS.mE)
        return false;
    
    assert(!lPipeline->mCommands.empty());
    PCommand lCmd=lPipeline->mCommands.front();
    assert(!lCmd->mParsedWords.empty());
    
    if(lCmd->mParsedWords.front().GetType()!=CParsedWord::TYPE_NORMAL) return false;

    //this assumes that the tokens are part of a contiguous string. This is not always the case, but it will always
    //be the case here.
    TTokenRange lRange=lCmd->mParsedWords.front().GetTokenRange();

    std::wstring lWFirst(lRange.first->mB,(lRange.second-1)->mE);
    std::string lFirst=utf8(lWFirst);
    
    if(gInterpreter->GetBuiltin(lFirst)) return true;
    if(!os::get_command(lFirst).empty()) return true;
    if(gInterpreter->GetAlias(lWFirst)) return true;

    return false;
}

void CParser::AppendPythonTokens(TTokenIt pB,TTokenIt pE,std::wstring &pDst,int &pIndent)
{
    enum ELast
    {
        NORMAL,
        STRING,
        SPECIAL_STRING,
        NEWLINE
    };
    
    ELast lLast=NORMAL;

    wchar_t lPrefix=0;
    
    for(auto i=pB;i!=pE;++i)
    {
        switch(i->mToken)
        {
        case TOKEN_COMMENT:
            break;
        case TOKEN_STRING_S:
        case TOKEN_STRING_D:
        case TOKEN_STRING_TS:
        case TOKEN_STRING_TD:
            if(lLast==SPECIAL_STRING||(lPrefix&&lLast==STRING))
                pDst+=L'+';
            else if(lLast==NEWLINE)
                pDst.append(3*pIndent,L' ');
            if(lPrefix)
            {
                lLast=SPECIAL_STRING;
                if(lPrefix=='g')
                    AppendGlobString(*i,pDst);
                else //lPrefix=='e'
                    AppendEnvString(*i,pDst);
                lPrefix=0;
            }
            else
            {
                lLast=STRING;
                pDst.append(i->mB,i->mE);
            }
            break;
        case TOKEN_STRING_I:
        case TOKEN_STRING_TI:
            if(lLast==STRING||lLast==SPECIAL_STRING)
                pDst+=L'+';
            else if(lLast==NEWLINE)
                pDst.append(3*pIndent,L' ');
            lLast=SPECIAL_STRING;
            AppendInvertedString(*i,pDst);
            if(lPrefix)
            {
                if(lPrefix=='l')
                    pDst+=L".splitlines()";
                else
                    pDst+=L".split()";
                lPrefix=0;
            }
            break;
        case TOKEN_NEWLINE:
            pDst+=L'\n';
            lLast=NEWLINE;
            break;
        case TOKEN_INDENT:
            ++pIndent;
            break;
        case TOKEN_DEDENT:
            --pIndent;
            break;
        case TOKEN_STRING_PREFIX:
            if(*(i->mB)=='g'||*(i->mB)=='e'||*(i->mB)=='s'||*(i->mB)=='l')
            {
                lPrefix=*(i->mB);
                break;
            }
            //else... fall through
        default:
            if(lLast==NEWLINE)
                pDst.append(3*pIndent,L' ');
            lLast=NORMAL;
            pDst.append(i->mB,i->mE);
        }
    }
    if(lLast==NEWLINE)
        pDst.append(3*pIndent,L' ');
}

void CParser::GenerateParsedLine(std::wstring &pParsedLine,const std::vector<CToken> &pTokens)
{
    TTokenIt lLast=pTokens.begin();
    int lIndent=0;
    
    for(auto i=mListStmts.begin();i!=mListStmts.end();++i)
    {
        auto j=i->mStmts.begin();
        //emit everything until the start of the list statement
        AppendPythonTokens(lLast,j->mB,pParsedLine,lIndent);
        
        bool lIfEmitted=false;
        EToken lLastSeparator=TOKEN_INVALID;
        
        for(auto s=i->mSeparators.begin();s!=i->mSeparators.end();++s,++j)
        {
            if(!lIfEmitted)
            {
                pParsedLine+=L"if (";
                lIfEmitted=true;
            }
            else if(lLastSeparator==TOKEN_OP_ANDAND)
            {
                pParsedLine+=L" and ";
            }
            else if(lLastSeparator==TOKEN_OP_OROR)
            {
                pParsedLine+=L" or ";
            }
            
            //it should never happen that the statement is empty
            assert(j->mB!=j->mE);

            pParsedLine+=L"shell('";
            AppendQuoted(j->mB->mB,(j->mE-1)->mE,pParsedLine);
            pParsedLine+=L"')==0";
            lLastSeparator=*s;
        }

        //it should never happen that the statement is empty
        assert(j->mB!=j->mE);

        //this is the last one, so it might be a python expression. we have to check how it should be interpreted.
        if(ShouldParseAsShell(*j))
        {
            if(!lIfEmitted) //we still need to use if to suppress output
            {
                pParsedLine+=L"if (";
                lIfEmitted=true;
            }
            else if(lLastSeparator==TOKEN_OP_ANDAND)
            {
                pParsedLine+=L" and ";
            }
            else if(lLastSeparator==TOKEN_OP_OROR)
            {
                pParsedLine+=L" or ";
            }
            
            pParsedLine+=L"shell('";
            AppendQuoted(j->mB->mB,(j->mE-1)->mE,pParsedLine);
            pParsedLine+=L"')==0):\n";
            //indent
            pParsedLine.append((lIndent+1)*3,L' ');
            pParsedLine+=L"pass";
        }
        else
        {
            if(lIfEmitted)
            {
                //close the if
                if(lLastSeparator==TOKEN_OP_ANDAND)
                {
                    pParsedLine+=L"):\n";
                }
                else
                {
                    pParsedLine+=L")==False:\n";
                }
                //indent
                pParsedLine.append((lIndent+1)*3,L' ');
            }
            //emit the statement
            AppendPythonTokens(j->mB,j->mE,pParsedLine,lIndent);
        }

        lLast=j->mE;
    }
    
    AppendPythonTokens(lLast,pTokens.end(),pParsedLine,lIndent);

    if(cmd_line().IsSet(sDebugParser))
        msg_info() << "parsed line:\n-----------\n" << pParsedLine << "-----------------\n";
}

/*namespace bush*/ } /*namespace buola*/ }
