/* lex.cpp - implement lexing.
 *
 * Lex acts like a global singleton that retains in-memory copies
 * of
 ****/

#include "lex.h"
#include <fstream>

// keep constants in private namespace
namespace
    {
    const size_t    IO_SIZE         = (1024*16);        // grow by this much, as needed
    const size_t    FIRST_READ      = (IO_SIZE * 16);   // size of our (big!) first read attempt
    const long      MAX_FILE_SIZE   = (1024*1024*8);    // I doubt we could handle 8MB worth of grammar!
    }

unique_ptr<char> TToken::Unquote() const
    {
    assert(Type == Lex::QUOTED);

    unique_ptr<char> Result(new char[TextLen-1]);
    strncpy(Result.get(), Text+1, TextLen-2);
    return Result;
    }

TToken TToken::Null = { nullptr, 0, 0 };

int     TToken::IsNull()
    {
    return Text == nullptr && TextLen == 0 && Type == 0;
    }


LexFile::LexFile(const char* Path, TFileChars Text, TToken FromInclude_)
    :   Filename(Path), FileText(std::move(Text)), ErrorCount(0), FromInclude(FromInclude_)
    {
    }

// defaults do all the work
LexFile::~LexFile()
    {
    }

// TRover: a reference to a pointer to const chars.
typedef char const * & TRover;

/* GetComment() - we hit a '/' followed by a '/' or '*'
 */
void GetComment(TRover Rover, TToken& Token)
    {
    Token.Type  = Lex::COMMENT;
    if(*Rover++ == '/') // if single-line comment
        {
        for(; *Rover; ++Rover)
            if(*Rover == '\n')
                break;
        }
    else            // else it's a multi-line style comment
        {
        for(; *Rover; ++Rover)
            if(Rover[0] == '*' && Rover[1] == '/')
                break;
        }
    if(*Rover == '\0')
        {
        // TODO: use line number when context stuff is working!
        ErrorExit(ERROR_EOF_IN_COMMENT, "Unexpected EOF in comment.\n");
        }
    }

int GetDirective(TRover Rover, TToken& Token)
    {
    int State   = 0;

    while(isalpha((unsigned char)*Rover++))
        ;
    if(Rover - Token.Text < TToken::MAXLEN)
        {
        int Len = int(Rover - Token.Text);

        if(Len == 5 && !strncmp("%left", Token.Text, 5))
            Token.Type  = Lex::LEFT, State = 1;
        else if(Len == 6 && !strncmp("%right", Token.Text, 6))
            Token.Type  = Lex::RIGHT, State = 1;
        else if(Len == 9 && !strncmp("%nonassoc", Token.Text, 9))
            Token.Type  = Lex::NONASSOC, State = 1;
        else if(Len == 6 && !strncmp("%token", Token.Text, 6))
            Token.Type  = Lex::TOKEN;
        else if(Len == 8 && !strncmp("%operand", Token.Text, 8))
            Token.Type  = Lex::OPERAND;
        else if(Len == 5 && !strncmp("%test", Token.Text, 5))
            Token.Type  = Lex::TEST;
        else if(Len == 6 && !strncmp("%start", Token.Text, 6))
            Token.Type  = Lex::START;
        else
            Token.Type  = Lex::UNKDIR;
        }
    
    return State;
    }
void GetWhite(TRover Rover, TToken& Token)
    {
    Token.Type  = Lex::WHITESPACE;

    while(strchr(" \t\r\v\f", *Rover))
        ++Rover;
    }

int GetIdent(char const* &Rover, TToken& Token)
    {

    while(isalnum((unsigned char)*Rover) || *Rover == '_')
        ++Rover;
    Token.Type  = Lex::IDENT;

    return 0;
    }
typedef char* charp;

/* GetNextToken1() - handling operator precedence
 */
int GetNextToken1(char const * &Rover, TToken& Token)
    {
    char    Char;
    int     State   = 1;

    Token.Text      = Rover;
    Token.Type      = Lex::NOTUSED;
    if((Char=*Rover++) == '\0')
        {
        Token.Type  = Lex::TKEOF;
        --Rover;
        State   = -1;
        }
    else if(isalpha((unsigned char)Char) || Char == '_')
        State   = GetIdent(Rover, Token);
    else if(strchr(" \t\r\v\f", Char))
        GetWhite(Rover, Token);
    else if(Char == '/' && (*Rover == '/' || *Rover == '*'))
        GetComment(Rover, Token);
    else if(Char == '%')
        State   = GetDirective(Rover, Token);
    else
        {
        switch(Char)
            {
            case '\n'   : Token.Type    = Lex::NEWLINE;     break;
            case '|'    : Token.Type    = Lex::ORBAR;       break;
            case ';'    : Token.Type    = Lex::SEMICOLON;   break;
            default     :
                Token.Type  = Lex::ILLEGAL;
            }
        }

    if(Rover - Token.Text > TToken::MAXLEN)
        {
        Token.Type      = Lex::TOOLONG;
        Token.TextLen   = TToken::MAXLEN;
        }
    else
        Token.TextLen   = (short)(Rover - Token.Text);

    if(Token.Type == Lex::NOTUSED)
        fprintf(stderr, "rover=%10.10s\n", Token.Text);
    assert(Token.Type != Lex::NOTUSED);

    return State;
    }
/* GetNextToken0() - normal state of tokenizing.
 */
int GetNextToken0(TRover Rover, TToken& Token)
    {
    char    Char;
    int     State;

    State           = 0;
    Token.Text      = Rover;
    Token.Type      = Lex::NOTUSED;
    if((Char=*Rover++) == '\0')
        {
        Token.Type  = Lex::TKEOF;
        --Rover;
        State   = -1;
        }
    else if(isalpha((unsigned char)Char) || Char == '_')
        State   = GetIdent(Rover, Token);
    else if(strchr(" \t\r\v\f", Char))
        GetWhite(Rover, Token);
    else if(Char == '/' && (*Rover == '/' || *Rover == '*'))
        GetComment(Rover, Token);
    else if(Char == '%')
        State   = GetDirective(Rover, Token);
    else
        {
        switch(Char)
            {
            case '\n'   : Token.Type    = Lex::NEWLINE;     break;
            case '|'    : Token.Type    = Lex::ORBAR;       break;
            case ';'    : Token.Type    = Lex::SEMICOLON;   break;
            default     :
                Token.Type  = Lex::ILLEGAL;
            }
        }

    if(Rover - Token.Text > TToken::MAXLEN)
        {
        Token.Type      = Lex::TOOLONG;
        Token.TextLen   = TToken::MAXLEN;
        }
    else
        Token.TextLen   = (short)(Rover - Token.Text);

    if(Token.Type == Lex::NOTUSED)
        fprintf(stderr, "rover=%10.10s\n", Token.Text);
    assert(Token.Type != Lex::NOTUSED);

    return State;
    }

/* Tokenize() - break input file into tokens.
 *
 * Returns the number of error tokens encountered. Note that we will
 * tokenize successfully, no matter what. Anything bad is just viewed
 * as an error token, which is still a token!
 */
typedef int (*STATEFUNC)(TRover Rover, TToken& Token);
int LexFile::Tokenize()
    {
    char*       Rover           = &(*FileText)[0]; // get raw pointer
    int         SectionCount    = 0;
    TToken      Token;
    int         State           = 0;    // start state
    STATEFUNC   Machine[]       = { GetNextToken0, GetNextToken1 };

    assert(Tokens.size() == 0);      // don't call us more than once!
    assert(Rover != nullptr);
    ErrorCount  = 0;

    Tokens.reserve(64);
while((State=Machine[State]((TRover)Rover, Token)) >= 0)
        {
        Tokens.push_back(Token);
        switch(Token.Type)
            {
            case Lex::ILLEGAL   :
            case Lex::TOOLONG   :
            case Lex::BADQUOTE  : ++ErrorCount;
                break;
            case Lex::SECTION   : ++SectionCount;
                break;
            }
        }

    return ErrorCount;
    }

/* Lex::FileLoad() - reads contents of Filename to create new LexFile.
 *
 * Lex is the owner of all such LexFile objects. If a particular file
 * was already loaded in the past, we just return a reference to that
 * LexFile.
 */
TTokenizedFile  Lex::FileLoad(const char* Filename, TToken FromInclude)
    {
    // caller must check for already loaded file
    assert(Files.count(Filename) == 0);

    TFileChars FileText(::FileLoad(Filename));

    // store it in our map until shutdown time
    Files[Filename] = unique_ptr<LexFile>(new LexFile(Filename, std::move(FileText), FromInclude));

    // return pointer to loaded file
    return Files[Filename].get();
    }

TTokenizedFile  Lex::Loaded(const char* Filename)
    {
    std::map<std::string, std::unique_ptr<LexFile> >::iterator Iter;

    Iter    = Files.find(Filename);
    if(Iter == Files.end())
        return nullptr;
    else
        return Iter->second.get();
    }

Lex::Lex()
    {
    }

Lex::~Lex()
    {
    }

#if 0
char* FileLoad(const char *filename)
    {
    // open in binary, don't want line ending transformations
    std::ifstream in(filename, std::ios::in | std::ios::binary);
    if (in)
        {
        in.seekg(0, std::ios::end);
        auto    Size = in.tellg();
        char*   contents = new char[(size_t)Size+1];
        in.seekg(0, std::ios::beg);
        in.read(&contents[0], Size);
        in.close();
        return(contents);
        }
    throw(errno);
    }
#endif

/* FileLoad() - load the contents of a file into a NUL-terminated string.
 *
 * There is some funky stuff about standard library file seeking designed
 * to permit files larger than you have an integral type for.  However,
 * our speed drops more than linearly with grammar size, so it's not a
 * bad idea to object to being handed a too-large file (e.g., they passed
 * us the name of an executable by mistake or something). So, we kill two
 * birds with one stone by defining a maximum file size that still fits
 * in 32 bits.
 */
static void Resize(const char* Filename, std::vector<char>& Buffer, size_t NewSize)
    {
    try {
        Buffer.resize(NewSize);
        }
    catch(std::bad_alloc)
        {
        ErrorExit(ERROR_INPUT_FILE_TOO_BIG,
            "Unable to allocate %luKB for reading file '%s' (is it huge?)\n",
            (unsigned long)(NewSize / 1024), Filename);
        }
    }

unique_ptr<std::vector<char>>   FileLoad(const char* Filename)
    {
    unique_ptr<std::vector<char>> Result(new std::vector<char>);
    std::vector<char>&  Buffer = *Result.get();
    size_t  LogicalSize     = 0;
    size_t  PhysicalSize    = 0;
    size_t  BytesRead       = 0;
  
    assert(Filename != NULL);
    assert((IO_SIZE % 512) == 0);   // nobody does weird sector sizes, right?

    // use exception-safe pointer to hold file handle
    unique_ptr<FILE, int(*)(FILE*)> FilePtr(nullptr, fclose);

    FilePtr.reset(fopen(Filename, "r"));
    if(FilePtr == nullptr)
        ErrorExit(ERROR_CANT_OPEN_INPUT_FILE,
              "Unable to open '%s' for reading.\n"
              "%d: %s\n", Filename, errno, strerror(errno));
    for(;;)
        {
        PhysicalSize    = PhysicalSize ? (PhysicalSize+IO_SIZE) : FIRST_READ;
        /* +1 is to allow for adding a trailing NUL byte */
        Resize(Filename, Buffer, PhysicalSize+1);

        BytesRead   = fread((void*)(&Buffer[0]+LogicalSize), sizeof(char), IO_SIZE, FilePtr.get());
        LogicalSize+= BytesRead;
        if(BytesRead != IO_SIZE)
            {
            /* check for errors first */
            if(ferror(FilePtr.get()))
                ErrorExit(ERROR_UNKNOWN_FREAD_ERROR,
                "Unknown fread() error while reading '%s'\n", Filename);
            /* if we hit the end, then we're done */
            else if(feof(FilePtr.get()))
                {
                Buffer[LogicalSize]     = '\0';
                break;                          // the only normal exit from the loop!
                }
            else
                ErrorExit(ERROR_FREAD_FELL_SHORT,
                "Can't happen: fread() fell short with no errors before EOF.\n");
            }
        }

    // shrink it back down to minimum needed size.
    Resize(Filename, Buffer, LogicalSize+1);

    return Result;
    }
