#include "token.h"
#include "error.h"
#include "filesys.h"

/************************************************************************
 * TToken implementation                                                *
 ************************************************************************
 *
 * TToken is simply the low-level representation of a token, featuring:
 *     Text    - a pointer to the start of the token in a text image of the file.
 *     TextLen - the length in bytes of same.
 *     Type    - the enumerated type of the token (e.g., TToken::IDENT).
 */



TTokenSlice TTokens::Slice(string Filename)
    {
    assert(TokensFrozen);
    for(auto const& f : Files)
        if(f->Name == Filename)
            return TTokenSlice(f->Begin,f->End,Tokens);
    assert(false);
    // to quiet mindless compiler warnings of path not returning value...
    return Slice("");
    }


#if 0
bool TToken::operator==(std:initializer_list Compare)
    {
    
    }
#endif

unique_ptr<char> TToken::Unquote() const
    {
    assert(Type == TToken::QUOTED);

    unique_ptr<char> Result(new char[TextLen-1]);
    strncpy(Result.get(), Text+1, TextLen-2);
    return Result;
    }

string TToken::Printable() const
    {
    string      Result;
    const char* Rover = Text;

    for(int iChar=0; iChar < TextLen; ++iChar)
        {
        char    Char = *Rover++;
        if(Char == '\b')
            Result  += "\\b";
        else if(Char == '\f')
            Result  += "\\f";
        else if(Char == '\n')
            Result  += "\\n";
        else if(Char == '\r')
            Result  += "\\r";
        else if(Char == '\t')
            Result  += "\\t";
        else if(Char < ' ' || Char > '~')
            {
            char    Hex[16];
            sprintf(Hex, "\\x%02X", Char & 0x00FF);
            Result  += Hex;
            }
        else
            Result.push_back(Char);
        }
    return Result;
    }

TToken TToken::Null = { nullptr, 0, TToken::NOTUSED };

bool    TToken::IsNull()
    {
    return Text == nullptr && TextLen == 0 && Type == 0;
    }

/************************************************************************
 * TTokens implementation                                               *
 ************************************************************************
 *
 * TTokens contains everything related to the files that have been
 * tokenized.
 */

TFileTokens TTokens::GetFile(string Filename)
    {
    TFileTokens Result;

    for(auto const & f : Files)
        if(f->Name == Filename)
            Result  = *f;
    return Result;
    }
TFileTokens TTokens::GetFile(int TokenId)
    {
    TFileTokens Result;

    for(auto const & f : Files)
        if(f->Begin <= TokenId && f->End >= TokenId)
            Result  = *f;
    return Result;
    }

class   TLexer
    {
public:
    TLexer() {ContextMatch = &TLexer::PrimaryMatch;};
    TToken  MatchNext(const char* Rover) { return (this->*ContextMatch)(Rover); }
private:
    TToken  PrimaryMatch(const char* Rover);
    TToken  OperatorMatch(const char* Rover);
    TToken (TLexer::*ContextMatch)(const char* Rover);
    TToken  MatchWhite(const char* Rover);
    TToken  MatchDirective(const char* Rover);
    TToken  MatchQuote(const char* Rover, unsigned char QChar);
    TToken  MatchComment(const char* Rover);
    };

TToken  TLexer::MatchWhite(const char* Rover)
    {
    TToken          Token   = {Rover-1, 1, TToken::WHITESPACE};
    unsigned char   Char    = *Rover;

    while(Char == ' ' || Char == '\t' || Char == '\f')
       ++Token.TextLen, Char = *++Rover;
    return Token;
    }
TToken  TLexer::MatchComment(const char* Rover)
    {
    TToken          Token   = {Rover-1, 1, TToken::COMMENT};
    unsigned char   Char    = *Rover++;

    if(Char == '/')
        while(Char && Char != '\n')
            ++Token.TextLen, Char = *Rover++;
    else
        {
        Token.Type  = TToken::BADCOMMENT;
        ++Token.TextLen, Char  = *Rover++;
        while(Char && !(Char == '*' && *Rover == '/'))
            ++Token.TextLen, Char = *Rover++;
        if(Char)
            {
            Token.Type      = TToken::MCOMMENT;
            Token.TextLen  += 2;
            }
        }

    return Token;
    }

TToken  TLexer::MatchQuote(const char* Rover, unsigned char QChar)
    {
    TToken          Token   = {Rover-1, 1, TToken::BADQUOTE};
    unsigned char   Char    = *Rover++;

    while(!(Char == '\0' || Char == '\n'))
        {
        ++Token.TextLen;
        if(Char == QChar)
            {
            if(*Rover == QChar)
                ++Token.TextLen, Char = *Rover++;
            else
                {
                Token.Type = TToken::QUOTED;
                ++Token.TextLen;
                }
            }
        Char    = *Rover++;
        }

    return Token;
    }


// Anything with high bit on is UTF-8 and we will treat as alpha!
static int IsAlpha(unsigned char Char)
    {
    unsigned LChar   = Char | ' ';
    return (LChar >= 'a' && LChar <= 'z') || Char > 127 || Char == '_';
    }
TToken  TLexer::MatchDirective(const char* Rover)
    {
    TToken          Token   = {Rover-1, 1, TToken::ILLEGAL};
    unsigned char   Char    = *Rover;

    if(Char == '%')
        ++Token.TextLen, Token.Type = TToken::SECTION;
    // this should NOT vary by locale!
    else if(Char == '{')
        {
        Token.Type      = TToken::BADCODE;     // assume it won't end well
        ++Token.TextLen, Char = *++Rover;
        while(Char && !(Char == '%' && Rover[1] == '}'))
            ++Token.TextLen, Char = *++Rover;
        if(Char == '%')
            {
            ++Token.TextLen;
            if(Rover[1] == '}')               // if well-terminated after all
                ++Token.TextLen, Token.Type = TToken::CODE;
            }
        }
    else if(IsAlpha(Char))
        {
        Token.Type  = TToken::UNKDIR;
        while(IsAlpha(Char))
            ++Token.TextLen, Char    = *++Rover;
        if(Token.TextLen == 6 && !strncmp(Token.Text, "%token", 6))
            Token.Type  = TToken::TOKEN;
        else if(Token.TextLen == 5 && !strncmp(Token.Text, "%test", 5))
            Token.Type  = TToken::TEST;
        else if(Token.TextLen == 5 && !strncmp(Token.Text, "%left", 5))
            Token.Type  = TToken::LEFT;
        else if(Token.TextLen == 6 && !strncmp(Token.Text, "%right", 6))
            Token.Type  = TToken::RIGHT;
        else if(Token.TextLen == 9 && !strncmp(Token.Text, "%nonassoc", 9))
            Token.Type  = TToken::NONASSOC;
        if(Token.Type == TToken::LEFT || Token.Type == TToken::RIGHT || Token.Type == TToken::NONASSOC)
            ContextMatch    = &TLexer::OperatorMatch;
        }
    if(Token.Type == TToken::UNKDIR)
        DUMP("unknown directive\n");

    return Token;
    }

TToken TLexer::PrimaryMatch(const char* Rover)
    {
    TToken          Token;
    unsigned char   Char;

    Token.Text      = Rover;
    Char            = *Rover++;
    Token.TextLen   = 1;            // we already ate one character
    if(Char == ' ' || Char == '\t' || Char == '\f')
        Token   = MatchWhite(Rover);
    else if(Char == '\n')
        Token.Type  = TToken::NEWLINE;
    else if(Char == '|')
        Token.Type  = TToken::ORBAR;
    else if(Char == ':')
        Token.Type  = TToken::COLON;
    else if(Char == ';')
        Token.Type  = TToken::SEMICOLON;
    else if(Char == '%')
        Token  = MatchDirective(Rover);
    else if(Char == '/' && (*Rover == '/' || *Rover == '*'))
        Token   = MatchComment(Rover);
    else if(Char == '\'' || Char == '"')
        Token   = MatchQuote(Rover, Char);
    else if(IsAlpha(Char) || (Char == '.' && IsAlpha(*Rover)))
        {
        Token.Type  = (Char == '.') ? TToken::DOTIDENT : TToken::IDENT;
        --Token.TextLen;    // well, worked out nice for the other cases...
        while(IsAlpha(Char) || (Char >= '0' && Char <= '9'))
            ++Token.TextLen, Char = *Rover++;
        }
    else if(Char == '\'' || Char == '\"')
        Token   = MatchQuote(Rover, Char);
    else
        Token.Type  = TToken::ILLEGAL;
    return Token;
    }

TToken TLexer::OperatorMatch(const char* Rover)
    {
    TToken          Token;
    unsigned char   Char;

    Token.Text      = Rover;
    Char            = *Rover++;
    Token.TextLen   = 1;            // we already ate one character
    if(Char == ' ' || Char == '\t' || Char == '\f')
        Token   = MatchWhite(Rover);
    else if(Char == '\n')
        {
        Token.Type      = TToken::NEWLINE;
        ContextMatch    = &TLexer::PrimaryMatch;
        }
    else    /* else, anything up to next whitespace looks like an operator token */
        {   /* note that someone else will do more detailed checking */
        Token.Type  = TToken::OPERATOR;
        while(!strchr(" \t\f\r\n", Char))
            Char = *Rover++;
        }
    return Token;
    }

/* GetLine() - identify token range of line containing given token.
 *
 * Although there is a token for newlines (TToken::NEWLINE), some
 * tokens (such as TToken::COMMENT) subsume multiple newlines. So,
 * we can't use just the token stream to count newlines, but rather
 * have to go back to the original text.
 */
TLineTokens TTokens::GetLine(int TargetId)
    {
    TLineTokens LineTokens;
    int&        LineNumber{LineTokens.LineNumber};

    DUMPVERBOSE("GetLine(TargetId=%d)\n", TargetId);
    assert(Files.size() > 0);
    // first locate the correct file
    TFileTokens FileTokens = GetFile(TargetId);
    LineTokens.TFileTokens::operator=(FileTokens);
    LineNumber  = 1;
    int iToken =     LineTokens.Start = FileTokens.Begin;
    // for each token in the file
    for(auto Token : Slice(FileTokens))
        if(iToken == TargetId)
            break;
        else
            {
            ++iToken;
            if(Token.Type == TToken::NEWLINE)
                LineTokens.Start = iToken;
            for(const char* Rover=Token.Text; (Rover=strchr(Rover, '\n'))!= nullptr; ++Rover)
                if(Rover - Token.Text >= Token.TextLen)
                    break;
                else
                    ++LineNumber;
            }
    LineTokens.Stop = iToken;
    TToken Token    = Get(iToken);
    while(Token.Type != TToken::NEWLINE && Token.Type != TToken::TKEOF)
        {
        ++LineTokens.Stop;
        Token   = Get(++iToken);
        }

#if 0
    for(auto const & f : Files)
        if(f->Begin <= TargetId && f->End >= TargetId)
            {   // then locate the correct newline
            TFileTokens*    Base = &LineTokens;

            *Base   = *f.get();     // copy over file info;
            // count newlines in raw text until we get to target token
            int     iToken      = f->Begin;
            const char* Rover   = Tokens[iToken].Text;
            LineNumber          = 1;
            while(*Rover)
                {
                if(Rover >= Target.Text)
                    break;
                if(*Rover == '\n')
                    ++LineNumber;
                ++Rover;
                }

            int     iToken;
            LineNumber          = 1;
            LineTokens.Start    = f->Begin;
            for(iToken=f->Begin; iToken < TargetId; ++iToken)
                if(Tokens[iToken].Type == TToken::NEWLINE)
                    {
                    ++LineNumber;
                    LineTokens.Start    = iToken+1;
                    }
            // still have to find end of line (or EOF)
            while(Tokens[iToken].Type != TToken::NEWLINE)
                if(++iToken >= f->End)
                    break;
            LineTokens.Stop = iToken;
            FoundTokenLine   = true;
            }
    assert(FoundTokenLine);
#endif
    DUMPVERBOSE("GetLine(TargetId=%d) returns %d-%d\n", TargetId, LineTokens.Start, LineTokens.Stop);
    return LineTokens;
    }


void TTokens::Tokenize(string Filename, int IncludeTokenId)
    {
    TTokenIndex TokenId;
    TLexer      Lexer;
    TToken      Token { nullptr };
    int         SectionCount    = 0;

    DUMPVERBOSE("Tokenize(%s, %d) %d tokens\n", Filename.c_str(), IncludeTokenId, Tokens.size());
    assert(!TokensFrozen);
    auto ReadFile   = FileNew();
    if(!ReadFile->OpenRead(Filename))
        Error("CANT_OPEN_FILE_FOR_READING",
            "Filename", Filename,
            "Code",     errno,
            "Error",    strerror(errno)
            );
    unique_ptr<TTokenizedFile>   File(new TTokenizedFile(ReadFile->Load()));

    TokenId = File->Begin     = Tokens.size();
    File->Include   = IncludeTokenId;
    File->Name      = Filename;

    const char* Rover           = File->Data.get();

    bool    Fail = false;
    for(auto Rover=File->Data.get(); *Rover && !Fail; ++TokenId)
        {
        Token   = Lexer.MatchNext(Rover);
    if(Token.Type == TToken::UNKDIR)
        DUMP("unknown directive in Tokenize\n");

        Tokens.push_back(Token);
        assert(Token.TextLen > 0);
        Rover += Token.TextLen;
        if(SectionCount >= 2)       // if rover points at user's program text
            {
            Token.Type      = TToken::PROGRAM;
            Token.Text      = Rover;
            Token.TextLen   = 0;    // allow it to be bigger than unsigned short
            Tokens.push_back(Token);
            break;
            }
        if(Token.TextLen >= TToken::MAXLEN)
            break;
        else if(
                (Token.Type == TToken::TOOLONG) ||
                (Token.Type == TToken::ILLEGAL) ||
                (Token.Type == TToken::UNKDIR) ||
                (Token.Type == TToken::BADCODE) ||
                (Token.Type == TToken::BADQUOTE)
               )
            {
            Fail    = true;
            break;
            }
        else if(Token.Type == TToken::SECTION)
            ++SectionCount;
        }
    if(!Fail)
        {
        Token.Text      = Rover;
        Token.TextLen   = 0;
        Token.Type      = TToken::TKEOF;
        Tokens.push_back(Token);
        }

    File->End   = Tokens.size();    // one greater than last token ID
    Files.push_back(std::move(File));

    if(Token.TextLen >= TToken::MAXLEN)
        Error("TOO_LONG",
            "Token",    LastTokenId(),
            "MaxLen",   TToken::MAXLEN-1
            );
    switch(Token.Type)
        {
        case TToken::ILLEGAL   :
            Error("BAD_TOKEN", "Token", TokenId);
        case TToken::UNKDIR    :
            Error("DIRECTIVE_UNKNOWN", "Directive", TokenId);
        case TToken::BADCODE   :
        case TToken::BADQUOTE  : 
            fprintf(stdout, "error token='%.*s'\n", Token.TextLen, Token.Text);
            break;
        }
    DUMPVERBOSE("Tokenize(%s, %d) returns (now %d tokens)\n", Filename.c_str(), IncludeTokenId, Tokens.size());
    }
