/* lex.cpp - implement lexing.
 *
 * Lex acts like a global singleton that retains in-memory copies
 * of
 ****/

#include "lex.h"
#include <iostream>
#include <fstream>
#include <regex>
#include <stack>

// keep constants in private namespace
namespace
    {
    const size_t    IO_SIZE         = (1024*16);        // grow by this much, as needed
    const size_t    FIRST_READ      = (IO_SIZE * 16);   // size of our (big!) first read attempt
    const long      MAX_FILE_SIZE   = (1024*1024*8);    // I doubt we could handle 8MB worth of grammar!
    }

unique_ptr<char> TToken::Unquote() const
    {
    assert(Type == Lex::QUOTED);

    unique_ptr<char> Result(new char[TextLen-1]);
    strncpy(Result.get(), Text+1, TextLen-2);
    return Result;
    }

TToken TToken::Null = { nullptr, 0, 0 };

int     TToken::IsNull()
    {
    return Text == nullptr && TextLen == 0 && Type == 0;
    }


LexFile::LexFile(const char* Path, TFileChars Text, TToken FromInclude_)
    :   Filename(Path), FileText(std::move(Text)), ErrorCount(0), FromInclude(FromInclude_)
    {
    }

// defaults do all the work
LexFile::~LexFile()
    {
    }



class   TLexer
    {
public:
    TLexer() {ContextMatch = &TLexer::PrimaryMatch;};
    TToken  MatchNext(const char* Rover) { return (this->*ContextMatch)(Rover); }
private:
    TToken  PrimaryMatch(const char* Rover);
    TToken  OperatorMatch(const char* Rover);
    TToken (TLexer::*ContextMatch)(const char* Rover);
    TToken  MatchWhite(const char* Rover);
    TToken  MatchDirective(const char* Rover);
    TToken  MatchQuote(const char* Rover, unsigned char QChar);
    TToken  MatchComment(const char* Rover);
    };

TToken  TLexer::MatchWhite(const char* Rover)
    {
    TToken          Token   = {Rover-1, 1, Lex::WHITESPACE};
    unsigned char   Char    = *Rover;

    while(Char == ' ' || Char == '\t' || Char == '\f')
       ++Token.TextLen, Char = *++Rover;
    return Token;
    }
TToken  TLexer::MatchComment(const char* Rover)
    {
    TToken          Token   = {Rover-1, 1, Lex::COMMENT};
    unsigned char   Char    = *Rover++;

    if(Char == '/')
        while(Char && Char != '\n')
            ++Token.TextLen, Char = *Rover++;
    else
        {
        Token.Type  = Lex::BADCOMMENT;
        ++Token.TextLen, Char  = *Rover++;
        while(Char && !(Char == '*' && *Rover == '/'))
            ++Token.TextLen, Char = *Rover++;
        if(Char)
            {
            Token.Type      = Lex::MCOMMENT;
            Token.TextLen  += 2;
            }
        }

    return Token;
    }

TToken  TLexer::MatchQuote(const char* Rover, unsigned char QChar)
    {
    TToken          Token   = {Rover-1, 1, Lex::BADQUOTE};
    unsigned char   Char    = *Rover++;

    while(!(Char == '\0' || Char == '\n'))
        {
        ++Token.TextLen;
        if(Char == QChar)
            {
            if(*Rover == QChar)
                ++Token.TextLen, Char = *Rover++;
            else
                {
                Token.Type = Lex::QUOTED;
                ++Token.TextLen;
                }
            }
        Char    = *Rover++;
        }

    return Token;
    }


// Anything with high bit on is UTF-8 and we will treat as alpha!
static int IsAlpha(unsigned char Char)
    {
    Char   |= ' ';
    return (Char >= 'a' && Char <= 'z') || Char > 127;
    }
TToken  TLexer::MatchDirective(const char* Rover)
    {
    TToken          Token   = {Rover-1, 1, Lex::ILLEGAL};
    unsigned char   Char    = *Rover;

    if(Char == '%')
        ++Token.TextLen, Token.Type = Lex::SECTION;
    // this should NOT vary by locale!
    else if(Char == '{')
        {
        Token.Type      = Lex::BADCODE;     // assume it won't end well
        ++Token.TextLen, Char = *++Rover;
        while(Char && !(Char == '%' && Rover[1] == '}'))
            ++Token.TextLen, Char = *++Rover;
        if(Char == '%')
            {
            ++Token.TextLen;
            if(Rover[1] == '}')               // if well-terminated after all
                ++Token.TextLen, Token.Type = Lex::CODE;
            }
        }
    else if(IsAlpha(Char))
        {
        Token.Type  = Lex::UNKDIR;
        while(IsAlpha(Char))
            ++Token.TextLen, Char    = *++Rover;
        if(Token.TextLen == 6 && !strncmp(Token.Text, "%token", 6))
            Token.Type  = Lex::TOKEN;
        else if(Token.TextLen == 5 && !strncmp(Token.Text, "%left", 5))
            Token.Type  = Lex::LEFT;
        else if(Token.TextLen == 6 && !strncmp(Token.Text, "%right", 6))
            Token.Type  = Lex::RIGHT;
        else if(Token.TextLen == 9 && !strncmp(Token.Text, "%nonassoc", 9))
            Token.Type  = Lex::NONASSOC;
        if(Token.Type == Lex::LEFT || Token.Type == Lex::RIGHT || Token.Type == Lex::NONASSOC)
            ContextMatch    = &TLexer::OperatorMatch;
        }
    // else, it's Lex::ILLEGAL

    return Token;
    }

TToken TLexer::PrimaryMatch(const char* Rover)
    {
    TToken          Token;
    unsigned char   Char;

    Token.Text      = Rover;
    Char            = *Rover++;
    Token.TextLen   = 1;            // we already ate one character
    if(Char == ' ' || Char == '\t' || Char == '\f')
        Token   = MatchWhite(Rover);
    else if(Char == '\n')
        Token.Type  = Lex::NEWLINE;
    else if(Char == '|')
        Token.Type  = Lex::ORBAR;
    else if(Char == ':')
        Token.Type  = Lex::COLON;
    else if(Char == ';')
        Token.Type  = Lex::SEMICOLON;
    else if(Char == '%')
        Token  = MatchDirective(Rover);
    else if(Char == '/' && (*Rover == '/' || *Rover == '*'))
        Token   = MatchComment(Rover);
    else if(Char == '\'' || Char == '"')
        Token   = MatchQuote(Rover, Char);
    else if(IsAlpha(Char))
        {
        Token.Type  = Lex::IDENT;
        while(IsAlpha(Char))
            ++Token.TextLen, Char = *Rover++;
        }
    else if(Char == '\'' || Char == '\"')
        Token   = MatchQuote(Rover, Char);
    else
        Token.Type  = Lex::ILLEGAL;
    return Token;
    }

TToken TLexer::OperatorMatch(const char* Rover)
    {
    TToken          Token;
    unsigned char   Char;

    Token.Text      = Rover;
    Char            = *Rover++;
    Token.TextLen   = 1;            // we already ate one character
    if(Char == ' ' || Char == '\t' || Char == '\f')
        Token   = MatchWhite(Rover);
    else if(Char == '\n')
        {
        Token.Type      = Lex::NEWLINE;
        ContextMatch    = &TLexer::PrimaryMatch;
        }
    else
        {
        Token.Type  = Lex::OPERATOR;
        while(!strchr(" \t\f\r\n", Char))
            Char = *Rover++;
        }
    return Token;
    }

int LexFile::Tokenize()
    {
    const char* Rover           = &(*FileText)[0]; // get raw pointer
    TLexer      Lexer;
    TToken      Token;
    TToken      FirstError      = TToken::Null;
    int         SectionCount    = 0;

    TokenStartId        = 
    while(*Rover)
        {
        Token   = Lexer.MatchNext(Rover);
        switch(Token.Type)
            {
            case Lex::ERROR     :
            case Lex::ILLEGAL   :
            case Lex::TOOLONG   :
            case Lex::UNKDIR    :
            case Lex::BADCODE   :
            case Lex::BADQUOTE  : ++ErrorCount;
                fprintf(stdout, "error token='%.*s'\n", Token.TextLen, Token.Text);
                break;
            case Lex::SECTION   : ++SectionCount;
                break;
            }
        Tokens.push_back(Token);
        assert(Token.TextLen > 0);
        Rover += Token.TextLen;
        if(SectionCount >= 2)       // if rover points at user's program text
            {
            Token.Type      = Lex::PROGRAM;
            Token.Text      = Rover;
            Token.TextLen   = 0;    // allow it to be bigger than unsigned short
            Tokens.push_back(Token);
            break;
            }
        }
    Token       = TToken::Null;
    Token.Type  = Lex::TKEOF;
    Tokens.push_back(Token);

    return ErrorCount;
    }


/* Lex::FileLoad() - reads contents of Filename to create new LexFile.
 *
 * Lex is the owner of all such LexFile objects. 
 */
unique_ref<LexFile> Lex::FileLoad(const char* Filename, TToken FromInclude, Lex* Lex_)
    {
    assert(!IsLoaded(Filename));
    TFileChars FileText(::FileLoad(Filename));

    Files.push_back(unique_ptr<LexFile>(new LexFile(Filename, std::move(FileText), FromInclude, Lex_)));

    // return pointer to loaded file
    return Files.back();
    }

bool  Lex::IsLoaded(const char* Filename)
    {
    bool    Result = false;

    for(auto const & f : Files)
        if(f->Filename == Filename)
            {
            Result  = true;
            break;
            }
    return Result;
    }

Lex::Lex()
    {
    }

Lex::~Lex()
    {
    }


/* FileLoad() - load the contents of a file into a NUL-terminated string.
 *
 * There is some funky stuff about standard library file seeking designed
 * to permit files larger than you have an integral type for.  However,
 * our speed drops more than linearly with grammar size, so it's not a
 * bad idea to object to being handed a too-large file (e.g., they passed
 * us the name of an executable by mistake or something). So, we kill two
 * birds with one stone by defining a maximum file size that still fits
 * in 32 bits.
 */
static void Resize(const char* Filename, std::vector<char>& Buffer, size_t NewSize)
    {
    try {
        Buffer.resize(NewSize);
        }
    catch(std::bad_alloc)
        {
        ErrorExit(ERROR_INPUT_FILE_TOO_BIG,
            "Unable to allocate %luKB for reading file '%s' (is it huge?)\n",
            (unsigned long)(NewSize / 1024), Filename);
        }
    }

unique_ptr<std::vector<char>>   FileLoad(const char* Filename)
    {
    unique_ptr<std::vector<char>> Result(new std::vector<char>);
    std::vector<char>&  Buffer = *Result.get();
    size_t  LogicalSize     = 0;
    size_t  PhysicalSize    = 0;
    size_t  BytesRead       = 0;
  
    assert(Filename != NULL);
    assert((IO_SIZE % 512) == 0);   // nobody does weird sector sizes, right?

    // use exception-safe pointer to hold file handle
    unique_ptr<FILE, int(*)(FILE*)> FilePtr(nullptr, fclose);

    FilePtr.reset(fopen(Filename, "r"));
    if(FilePtr == nullptr)
        ErrorExit(ERROR_CANT_OPEN_INPUT_FILE,
              "Unable to open '%s' for reading.\n"
              "%d: %s\n", Filename, errno, strerror(errno));
    for(;;)
        {
        PhysicalSize    = PhysicalSize ? (PhysicalSize+IO_SIZE) : FIRST_READ;
        /* +1 is to allow for adding a trailing NUL byte */
        Resize(Filename, Buffer, PhysicalSize+1);

        BytesRead   = fread((void*)(&Buffer[0]+LogicalSize), sizeof(char), IO_SIZE, FilePtr.get());
        LogicalSize+= BytesRead;
        if(BytesRead != IO_SIZE)
            {
            /* check for errors first */
            if(ferror(FilePtr.get()))
                ErrorExit(ERROR_UNKNOWN_FREAD_ERROR,
                    "Unknown fread() error while reading '%s'\n", Filename);
            /* if we hit the end, then we're done */
            else if(feof(FilePtr.get()))
                {
                Buffer[LogicalSize]     = '\0';
                break;                          // the only normal exit from the loop!
                }
            else
                ErrorExit(ERROR_FREAD_FELL_SHORT,
                "Can't happen: fread() fell short with no errors before EOF.\n");
            }
        }

    // shrink it back down to minimum needed size.
    Resize(Filename, Buffer, LogicalSize+1);

    return Result;
    }
