/*-------------------------------------------------------------------------
 - Copyright (c) 2024-2025 [XD-AMCC TEAM]
 - [XD-AMCC] is licensed under Mulan PSL v2.
 - You can use this software according to the terms and conditions of the Mulan PSL v2.
 - You may obtain a copy of Mulan PSL v2 at:
 -          http://license.coscl.org.cn/MulanPSL2
 - THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
 - OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
 - TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 - See the Mulan PSL v2 for more details.
 ------------------------------------------------------------------------*/


/**
 * @brief Inner operations of the class lexer(scanner).
 *
 * @author  WXQ#XDU
 * @date    2024.11
 */

#include <assert.h>
#include <stdio.h>

#include "_lex_inner.h"

// -----------------------------------------------------------------------
// Part I:  inner API ONLY used by ../lexer.c
//

//  Set input filepath tobe parsed.
void lexer_setInputFile (t_lexer_ptr pThis, const char* inputFilePath) {
    assert(NULL != pThis);

    lexer_release_resources(pThis, true);
    pThis->inputFilePath = inputFilePath;

    t_tokenstream * pStream = tokenstream_new();  // init my token stream
    if (NULL == pStream) {
        ERROR_REPORT_MALLOC();
        return;
    }
    pThis->tokenStream = pStream;
}


//  Open the input file.
//
//  Return values:
//    1 next file is opened successfully,
//    0 all input files had scanned,
//   -1 failed to open next file,
//
int lexer_open_file (t_lexer_ptr pThis) {
    const char* filePath = pThis->inputFilePath;
    log_trace("Try to open input-file [%s]", filePath);
    FILE * fp = fopen(filePath, "rb");
    if (NULL == fp) {
        ERROR_REPORT_FILEOP(EC_OPENFILE, filePath );
        pThis->currentPosition.filePath = filePath;
        return -1;
    }

    fseek(fp, 0, SEEK_END);
    long fileLen = ftell(fp);
    log_debug("fileSize is [%ld] BYTE%s, filePath is [%s]", fileLen,
              (fileLen > 1 ? "s" : ""), filePath);
    if (fileLen < 0) {
        ERROR_REPORT_FILEOP(EC_READFILE, filePath);
        return -1;
    } else  if (fileLen == 0) {
        ERROR_REPORT_FILEOP(EC_EMPTY_FILE, filePath);
        return -1;
    }

    pThis->currentHandler           = fp;
    pThis->currentPosition.line     = 0;
    pThis->currentPosition.column   = 0;
    pThis->currentPosition.filePath = filePath;
    return 1;
}


//  Load content of current input file into a big buffer.
//
//  NOTICE: We assume that the file is not too large, and therefore it will
//          not take up too much memory.
//
//  Return values:
//    >=0  successful to load file
//     -1  failed to read file
//
int  lexer_load_file (t_lexer_ptr pThis) {
    assert(NULL != pThis && NULL != pThis->currentHandler);
    t_charstream_ptr pCharStream = charstream_new(pThis->currentHandler, pThis->inputFilePath);
    if (NULL == pCharStream)
        return -1;
    pThis->charStream = pCharStream;

    log_debug("Loaded file content into memory buffer");
    return 0;
}

// Close input file and release resource related with current input file.
//
void lexer_close_file (t_lexer_ptr pThis) {
    if (NULL != pThis->currentHandler) {     // close the input file
        fclose(pThis->currentHandler);
        pThis->currentHandler = NULL;
    }
}


// Release all inner resource used by lexer and close file.
// If bReleaseTokens is non-zero, the token stream will be destroyed.
//
void  lexer_release_resources (t_lexer_ptr pThis, bool bReleaseTokens) {
    if ( NULL != pThis->inputFilePath ) {
        // DON'T free() the input-file-path,  because
        // it is a pointer to command-line-argument
        pThis->currentPosition.column   = 0;
        pThis->currentPosition.line     = 0;
        pThis->currentPosition.filePath = NULL;
    }

    if (NULL != pThis->charStream) {
        charstream_destroy(pThis->charStream);
        pThis->charStream = NULL;
    }

    if ( bReleaseTokens ) {
        if (NULL != pThis->tokenStream) {
            tokenstream_destroy (pThis->tokenStream);
            pThis->tokenStream = NULL;
        }
    }

    lexer_close_file(pThis);
}

// Forward declarations for local names

static int extract_word_(t_lexer_ptr pThis);
static bool skipSpaceChars_(t_lexer_ptr pThis);
static int postProcess_(t_lexer_ptr pThis);
static int recognize_nextToken_(t_lexer_ptr pThis);
static t_word_info recognize_token_core_(t_word_info wordInfo);
static bool eatNewLine_(t_lexer_ptr pThis, char ch1);
static void discardBlockComment_(t_lexer_ptr pThis);
static void discardLineComment_(t_lexer_ptr pThis);
static void discardErrorChars_(t_lexer_ptr pThis);

#define POS_COL_INC(pLexer)  pLexer->currentPosition.column += 1
#define POS_LINE_INC(pLexer) pLexer->currentPosition.line += 1;  \
                             pLexer->currentPosition.column = 1


// Scan and recognize all tokens in current input file.
//
// Return values:
//      0  all is successful, or there have errors that will be ignored.
//     -2  fail on memory allocation
//
int lexer_scan_file (t_lexer_ptr pThis) {
    pThis->currentPosition.line   = 1;
    pThis->currentPosition.column = 1;

    int  result;
    while ( true ) {
        result = recognize_nextToken_(pThis);
        if (result < 0) // un-recoverable errors
            return -2;

        if (TK_EOF == pThis->lastTokenKind)
            break;

        // some extra works
        switch ( pThis->lastTokenKind ) {
            case TK_COMMENT_BLOCK : // opening tag: '/*'
                discardBlockComment_ (pThis);
                continue;
            case TK_COMMENT_LINE : // opening tag: '//'
                discardLineComment_ (pThis);
                continue;
            case TK_ERROR : // invalid/unexpected char
                discardErrorChars_ (pThis);
                continue;
            default:
                break;
        }
    } // while(true)

    return 0;
}



// -----------------------------------------------------------------------
// Part II:  local, helper operations ONLY be used in this file
//


// Recognizes the next token.
//
// Return values:
//  <0 caller must stop work for fetal errors,
//   0 caller must do more work,
//  +1 a new token has appended into the token-stream
//
static int recognize_nextToken_ (t_lexer_ptr pThis) {

    // Pre-Process: skip white spaces before a token
    if ( skipSpaceChars_ (pThis) ) {
        pThis->lastTokenKind = TK_EOF;
        return 0;
    }

    // Main-Process: scan char-sequences, recognize a token
    if (0 == extract_word_ (pThis) )
        return 0;

    // Post-Process: creates a new token and add it to token stream
    return postProcess_ (pThis);
}


// Pre-Process: Skips white spaces before a token.
// Return values:
//   Return TRUE ONLY when EOF is encountered.
//   Return FALSE to indicate that there are some words to be analyzed.
//
static bool skipSpaceChars_ (t_lexer_ptr pThis) {
    char ch;
    while (true) {
        ch = READ_CHAR (pThis);
        if ( IS_SPACE (ch) ) {
            POS_COL_INC (pThis);
            continue;
        } else if ( IS_EOL(ch) ) {
            eatNewLine_(pThis, ch);
            continue;
        } else if ( IS_EOF(ch) ) {
            return true;
        } else {
            // see a non-space char, put it back to input
            BACK_CHAR (pThis);
            return false;
        }
    }
}


// Main-Process: Extracts a word that may be a token or not.
// Return values:
//   0  Let caller must do more work,
//  +1  Got a token
//
static int extract_word_ (t_lexer_ptr pThis) {
    int txtLen;
    charstream_markBegin(pThis->charStream);

    do {
        t_word_info wordInfo;
        wordInfo.theLexer = pThis;
        wordInfo.n_scanedchars = 0;
        wordInfo.kind = TK_ERROR;

        wordInfo = recognize_token_core_(wordInfo);

        pThis->lastTokenKind = wordInfo.kind;
        txtLen = wordInfo.n_scanedchars;
    } while(false);

    switch ( pThis->lastTokenKind ) {
        case TK_COMMENT_BLOCK:
        case TK_COMMENT_LINE:
        case TK_ERROR :
            pThis->currentPosition.column += txtLen;
            return 0;
        default: break;     // a valid token
    }

    charstream_markEnd(pThis->charStream);
    return 1;
}


// Post process: generates product of my lexer.
// - Revises the kind of ID-like token,
// - Creates a token object and appends it to the end of the token stream.
//
// Return values:
//  <0  caller must stop work for fetal errors,
//   0  caller must do some extra work,
//  +1  a new token has appended into the token-stream
//
static int postProcess_ (t_lexer_ptr pThis) {
    int  textLen = charstream_markedText_length(pThis->charStream);
    const char * thisText = charstream_markedText(pThis->charStream);

    // Revise the kind for an ID-like token
    if (TK_ID == pThis->lastTokenKind) {
        pThis->lastTokenKind = token_query_id_kind(thisText );
    }

    if ( token_notSupport(pThis->lastTokenKind) ) {
        EnumErrorCode errorCode = EC_UNSUPPORT_TOKEN;
        if ( TK_KW_MIN__ < (pThis->lastTokenKind)
            &&             (pThis->lastTokenKind) < TK_TYPE_MAX__ )
            errorCode = EC_UNSUPPORT_KW;

        ERROR_REPORT_INPUT(pThis->inputFilePath,
                           pThis->currentPosition.line,
                           pThis->currentPosition.column,
                           errorCode,  thisText );
        pThis->lastTokenKind = TK_UNSUPPORT;
        pThis->currentPosition.column += textLen;

        return 0;  // Don't put it into the token-stream
    }

    t_token_ptr newToken = token_new(thisText, pThis->lastTokenKind, pThis,
                                     pThis->currentPosition.line,
                                     pThis->currentPosition.column);
    pThis->currentPosition.column += textLen;
    if (NULL == newToken) {
        ERROR_REPORT_MALLOC();
        return -1;
    }

    tokenstream_append(pThis->tokenStream, newToken);
    {
        t_position_ptr pos = token_get_position(newToken);
        log_trace("  got a token < %-15s , [%2d : %2d] , \"%s\" >",
                  token_get_kindName(newToken),
                  pos->line,            pos->column ,
                  token_get_text(newToken)    );
    }
    return 1;
}


// Consumes a line-separator
static bool eatNewLine_ (t_lexer_ptr pThis, char ch1) {
    char ch2;
    switch (ch1) {
        case '\n' :
            POS_LINE_INC(pThis);
            // count a pair of <NewLine, Return> as one column
            ch2 = READ_CHAR(pThis);
            if ('\r' != ch2) BACK_CHAR(pThis);
            return true;

        case '\r' :
            POS_LINE_INC(pThis);
            // count a pair of <Return, NewLine> as one column
            ch2 = READ_CHAR(pThis);
            if ('\n' != ch2) BACK_CHAR(pThis);
            return true;

        default:
            return false;
    }
}


// DISCARD the body and closing tag of a block-comment
static void discardBlockComment_ (t_lexer_ptr pThis) {
    char ch1, ch2;
    int L0 = pThis->currentPosition.line;
    int C0 = pThis->currentPosition.column - 2;
    while (true) {
        ch1 = READ_CHAR(pThis);
        if ( IS_EOL(ch1) ) {
            eatNewLine_(pThis, ch1);        continue ;
        }

        switch (ch1) {
            case '*' :
                POS_COL_INC(pThis);
                ch2 = READ_CHAR(pThis);
                if ('/' == ch2) {  // closing tag of a block comment
                    POS_COL_INC(pThis);;
                    return   ;                          // RETURN-CASE-1
                } else  {
                    BACK_CHAR(pThis);
                }
                continue;

            case CHAR_EOF :
                ERROR_REPORT_INPUT(pThis->inputFilePath,
                                   pThis->currentPosition.line,
                                   pThis->currentPosition.column,
                                   EC_BLOCKCOMMENT ,
                                   L0, C0);
                return;                                 // RETURN-CASE-2

            default :
                POS_COL_INC(pThis);
                continue;
        }
    }
}


// DISCARD the body of a line-comment
static void discardLineComment_ (t_lexer_ptr pThis) {
    char ch1;
    while (true) {
        ch1 = READ_CHAR(pThis);
        if ( IS_EOL(ch1) ) {
            eatNewLine_(pThis, ch1);
            return ;
        } else if ( IS_EOF(ch1) ) {
            BACK_CHAR(pThis);
            return ;
        }
        POS_COL_INC(pThis);
    }
}


// DISCARD the following chars until a white space char,
//         an end-of-line, or the end-of-file.
//
// This is a kind of "panic mode" recovery, simplest recovery strategy.
//
static void discardErrorChars_ (t_lexer_ptr pThis) {
    char buffer[10];
    char ch = READ_CHAR(pThis);
    int  badColumn = pThis->currentPosition.column;
    ERROR_REPORT_INPUT(pThis->inputFilePath,
                       pThis->currentPosition.line, badColumn,
                       EC_CHAR, char2Cstring(buffer, ch) );
    do {
        if      ( IS_SPACE(ch) ) { POS_COL_INC(pThis);          return; }
        else if ( IS_EOL(ch)   ) { eatNewLine_(pThis, ch);       return; }
        else if ( IS_EOF(ch)   ) { BACK_CHAR(pThis);            return; }

        POS_COL_INC(pThis);
        ch = READ_CHAR(pThis);
    } while (true);
}




// -----------------------------------------------------------------------
// Following is the concrete implementation of "How to recognize one token".
// There are 3 styles implementation, and they MUST be equivalent.
//
// NOTICE: When any branch of RECOGNIZE_TOKEN_STYLE is changed,
//         the lexer_style() in this file SHOULD be changed.
//


// The core task of recognizing a token:
//   Recognizes + Designates token's kind.
//
// Returned object:
// (1) wordInfo.n_scanedchars is the number of scanned characters.
// (2) wordInfo.kind is the kind of token.
//
static t_word_info recognize_token_core_ (t_word_info  wordInfo) {

#if     RECOGNIZE_TOKEN_STYLE == 1

    // Style 1: Simulate DFA based on transition table, see dfa.c

    wordInfo = dfa_tableDriven_recognize ( wordInfo );


#elif   RECOGNIZE_TOKEN_STYLE == 2

    // Style 2: Simulate DFA based on direct-code style.
    //  In this style,  a state transition is implemented
    //  using "goto" statements,  OR, structured programs below.
    //  ::: Programming in two-level-nested cases
    //  switch ( current_state ) {
    //  case 1:
    //      switch ( current_char ) {
    //      case ... :     next_state = ...
    //      }
    //  }

#error "NO implementation of RECOGNIZE_TOKEN_STYLE 2"

//    wordInfo = dfa_directCoding_recognize ( wordInfo );


#elif   RECOGNIZE_TOKEN_STYLE == 3

    // Style 3: Recursive-Descent style:  maybe no guidance by DFA.
    //
    // Programmers translate the lexical rule, aka Regular Expression,
    // to a complex, hierarchical control flow block
    // according to the first char of a word.

#error "Un-finished implementation of RECOGNIZE_TOKEN_STYLE 3"

    // This function is the un-finished work from WXQ#XDU

    wordInfo = recursive_descent_recognize ( wordInfo ); // see recursive.c


#else


#error "The macro RECOGNIZE_TOKEN_STYLE must be 1, 2, or 3"


#endif

    return wordInfo;
}


const char * lexer_style() {

#if     RECOGNIZE_TOKEN_STYLE == 1

    return "Simulate DFA based on Transition-Table";

#elif   RECOGNIZE_TOKEN_STYLE == 2

    return "Simulate DFA based on Directly-Coding style";

#elif   RECOGNIZE_TOKEN_STYLE == 3

    return "Simulate DFA in Recursive-Descent style";

#else

    return "Bad value of RECOGNIZE_TOKEN_STYLE";

#endif

}

// End-Of-File
