#include "constants.h"
#include "preprocessor.h"
#include "misclib.h"
#include "model.h"
#include <string>
#include <vector>
#include <map>
#include <stdarg.h>

using namespace std;


/*---------------------------------------------------------------------------------------- 

  Definition of the preprocessor class :

  ---------------------------------------------------------------------------------------- */
class preprocessor::local { // define local static functions whose declaration does not need
  // to be globally available.
  public:
    //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
    static void error(const char * fmt, ...){
      va_list(args);
      va_start(args, fmt);
      string format(fmt);
      string msg;
      stdvsprintf(msg, format.c_str(), args);
      throw cgcmexception(msg, -1);
    }
    //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
    static bool extract_token(const string & input, int & pos, int & line_number, token & output)
    {
      // spaces ' ', carriage returns '\n', and tabs '\t' are ignored
      // all other elements, except what is inside a comment is tokenized
      static const string uni_tokens(gdb_TOKENIZE_SINGLE_TOKEN_LIST); // single elements that form tokens
      static const string multi_tokens[] = gdb_TOKENIZE_MULTI_TOKEN_LIST;
      static const string space_tokens(" \t\n");
      static const char line_delimiter = '\n';
      /* typedefs */ 
      struct comment_pair {
        string start;  
        string end;
        comment_pair(const char * s1, const char * s2){
          this->start = s1;
          this->end   = s2;
        }
      };
      static comment_pair cp[2] = { comment_pair("/*", "*/"), comment_pair("//", "\n") };
      // reset the output token 
      output.word.clear();
      output.namelist_number = -1;
      output.line_number = -1;

      int mspos = 0; // multicharacter token start position
      // ignore space characters, increment line if need
      while( ( pos < input.size() ) && ( space_tokens.find_first_of(input[pos]) != string::npos ) ){
        if( input[pos] == line_delimiter ) line_number++;
        pos++;
      }
      // ignore comments
      for(int i = 0; i < 2; i++){
        const int cpsize = cp[i].start.size();
        const int cpesize = cp[i].end.size();
        if( pos+cpsize <= input.size() && input.substr(pos, cpsize) == cp[i].start ){
          const int sline = line_number;
          pos+=cpsize;
          while(true){
            if( pos+cpesize > input.size() ) error("Unterminated comment beginning on line %i.", sline);
            if( input[pos] == line_delimiter ) line_number++;
            if( input.substr(pos, cpesize) == cp[i].end ){ pos+=cpesize;  return true; }
            pos++;
          }
        }
      }
      // extract a string if present
      if( pos < input.size() && input[pos] == '"'){
        output.line_number = line_number;
        const int sline = line_number;
        mspos = pos; pos++;
        while( ! (input[pos] == '"' && input[pos-1] != '\\' ) ){
          if( input[pos] == line_delimiter ) line_number++;
          pos++;
          if(pos >= input.size() ) error("Unterminated string beginning on line %i.", sline);
        }
        output.word = input.substr(mspos, pos - mspos + 1); 
        output.type = token::STRING; 
        pos++;
        return true;
      }
      // attempt to find multi token identifiers
      if( pos <= input.size()-2 ){
        for(int i = 0; i < gdb_TOKENIZE_MULTI_TOKEN_LIST_SIZE; i++){
          const string & multiToken = multi_tokens[i];
          if( input[pos] == multiToken[0] && input[pos+1] == multiToken[1] ){
            output.line_number = line_number;
            output.word = multi_tokens[i];
            output.type = token::MULTI;
            pos+=2;
            return true;
          }
        } 
      }

      // find single unitoken identifiers
      if( pos < input.size() && uni_tokens.find_first_of(input[pos]) != string::npos ){
        output.line_number = line_number;
        output.word =input.substr(pos,1);
        output.type = token::SINGLE; 
        pos++;
        return true;
      }
      // multitoken binding any token that is not in uni_tokens, space, comment identifier or string
      if( pos < input.size() ){
        int mspos = pos; pos++;
        while(true){
          if( pos >= input.size() ) break;
          //   if( input[pos] == line_delimiter ) line_number ++; (this will get updated next time)
          if( uni_tokens.find_first_of(input[pos]) != string::npos ) break;
          if( space_tokens.find_first_of(input[pos]) != string::npos ) break;
          for( int i = 0; i < 2; i++){ // check for comments
            const int cpsize = cp[i].start.size();
            if( pos + cpsize < input.size() && ( input.substr(pos, cpsize) == cp[i].start  )) break;
          }
          pos++; 
        }
        // we save from mspos to pos - 1 (do not include the character that caused the loop to break)
        output.line_number = line_number;
        output.word = input.substr(mspos, pos-mspos);
        output.type = token::WORD; 
        return true; // do not increment pos here
      }
      return false; // nothing was found or we are done
    }
    //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
    static void tokenizenamelist(preprocessordata & pdata, namelistdata & ndata)
    {
      const string & sfiledata = ndata.filecontent;
      vector<token> & tokens = ndata.tokens; 
      int line_number = 1;
      int pos = 0;
      token etoken;
      while( extract_token(sfiledata, pos, line_number,etoken)){
        if( etoken.size() > 0 ){ // coudld be a macro
          if(etoken.word[0] == '$'){
            if(etoken.size() > 1){
              string macroid = etoken.word.substr(1,etoken.size()-1);
              if( pdata.doesmacroexist(macroid) ){
                // do macro substitution
                const token_list & macro_tokens = pdata.getmacrobyname( macroid );
                for(size_t iter = 0; iter < macro_tokens.size(); iter++){
                  etoken.word = macro_tokens[iter];
                  etoken.type = token::WORD;
                  etoken.namelist_number = pdata.namelists.size() - 1;
                  tokens.push_back( etoken );
                }
              }
            }
          }else{
            etoken.namelist_number = pdata.namelists.size() - 1;
            tokens.push_back( etoken );
          } 
        }
      }
    }
    //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
    static void getnamelistpaths(const char * pathstr, vector<string> & pathlist)
    {
      size_t spos = 0;
      size_t epos = 0;
      string path(pathstr); // convert to string ... better
      while(true){
        if( path[epos] == ':' || epos == path.size()){
          if(epos-1-spos > 0){
            pathlist.push_back(path.substr(spos, epos-spos));
            if(pathlist.back()[pathlist.back().size()-1] != '/'){
              pathlist.back() += string("/");
            }
          }
          spos=epos+1; 
          if(epos >= path.size()) break;
        }  
        epos++;
      }
    };
    //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
    static void open_new_namelist(model_class & mo, preprocessordata & pdata, namelistdata & ndata, const char * fname)
    {
      vector<string> paths;
      paths.push_back("./"); // push back current directory
      getnamelistpaths( mo.get_global_param_by_name(gdb_str_INCLUDE_SEARCH_DIR).getsValue(), paths );
      const char * env_var_name = mo.get_global_param_by_name(gdb_str_INCLUDE_SEARCH_DIR_ENV_VAR_NAME).getsValue();
      const char * env_paths = getenv(env_var_name);
      if( env_paths != NULL ){
        getnamelistpaths( env_paths, paths );
      }
      bool open = false; int iter = 0;
      while( ! open && iter < paths.size() ){
        string fsname = paths[iter] + fname;
        open = ndata.open( fsname.c_str() ); 
        iter++;
      }
      if( !open ){ // failed to find or open the requested file
        fprintf(stderr, "The file '%s' cannot be found.\n", fname);
        fprintf(stderr, "Try defining environment variable '%s' to include the file's path.\n", env_var_name);
        error("Could not open file '%s'.\n", fname);
      }
      tokenizenamelist(pdata, ndata);
    }
    //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
    static bool isprekeyword(const string & keyword){
      return matchkeyword(keyword, 
          gdb_KEYWORD_PREPROC_BREAKPOINT,
          gdb_KEYWORD_PREPROC_END,
          gdb_KEYWORD_PREPROC_INCLUDE,
          gdb_KEYWORD_PREPROC_INCLUDE_PRE,
          gdb_KEYWORD_PREPROC_DEFINE_PRE
          );
    };
    //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
    static bool process_define_macro(model_class & mo, const vector<token> & tokens, int & pos, token_list & output){
      // this function converts defined(SYMBOL) to either (false) if not defined or (true) if defined 
      // assuming the macro is correctly implemented
      if( tokens[pos].word == "defined" ){
        if( pos+3 < tokens.size() ){
          if( tokens[pos+1].word == "(" ){
            const string & symbol = tokens[pos+2].word;
            if( ! isVariableLegal( symbol.c_str() ) || tokens[pos+3].word != ")"){
              error("'defined(...)' macro takes a single word as its argument but instead was processed on \"%s\".", symbol.c_str());
            }
            if( mo.issymboldefined(symbol.c_str()) ){
              output.push_back("("); //+="(true)"; 
              output.push_back("true");
              output.push_back(")");
            }else{
              output.push_back("("); //+="(true)"; 
              output.push_back("false");
              output.push_back(")");
              //output+="(false)";
            } 
            pos = pos+3;
            return true;
          }
        }
      }
      return false;
    }
    //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
    static void readpreprocdirective( preprocessordata & pdata, namelistdata & ndata, int & pos){
      //const vector<string> & tokens, const vector<int> & line_numbers, int & pos, const char * namelist){
      model_class & mo = *(pdata.mo);
      const vector<token> & tokens = ndata.tokens;
      const char * namelist = ndata.filename.c_str();
      const int epos = tokens.size() - 1;
      const string  & keyword = tokens[pos].word;
      const int opos = pos;
      if(keyword == gdb_KEYWORD_PREPROC_BREAKPOINT){ // used for debugging ONLY
        fprintf(stderr, "PREPROCESSOR BREAKPOINT:::\n");
#warning "Add preprocessor breakpoint processing."
        //breakpoint();  // set up a break point parser
        //runbreakpointloop(mo);
      }else if(keyword == gdb_KEYWORD_PREPROC_END){ //  end all processing from now on
        pos = epos;
        return ;
      }else if(keyword == gdb_KEYWORD_PREPROC_INCLUDE || keyword == gdb_KEYWORD_PREPROC_INCLUDE_PRE){ 
        token_list fileExpr;
        get_preproc_arg(mo, fileExpr, tokens, pos, true);
        if( fileExpr.size() == 0 ){
          error("No filename provided to '%s' directive.\n", keyword.c_str());
        }else{
          string file = convert_expr_to_string(fileExpr);
          try{ 
            pdata.readnamelist( file.c_str() );
          }catch(const cgcmexception & err){
            error("Preprocessor Error: %s\n'%s' failed on line '%i' of namelist file '%s'.\n", err.what(),
                keyword.c_str(), tokens[opos].line_number, ndata.filename.c_str());
          }
        }
      }
    };
    //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
    bool static preprocessfile(preprocessordata & pdata, namelistdata & ndata)
    {
      const vector<token> & tokens = ndata.tokens;
      int pos = 0;
      while(pos < static_cast<int>(tokens.size())){
        bool firsttokeninline = (pos ==  0 || ( pos > 0 && (tokens[pos].line_number - tokens[pos-1].line_number > 0))); 
        // if it is we check to see that this is preprocessor directive first!
        string prekeyword;  
        if(firsttokeninline && isprekeyword(tokens[pos].word)){
          // then process the line otherwise
          readpreprocdirective(pdata, ndata, pos);
        }else{ // add token to the stack
          pdata.processed_namelist.push_back( tokens[pos] );
        }
        pos++;	
      }
      return true;
    }
    //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
    }; // end local component of preprocessor static class


    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    // Global Function
    bool extract_words(const string & input, mup::token_list & output)
    {
      vector<token> buffer;
      preprocessor::tokenizestring(input, buffer);
      preprocessor::combinetokens(buffer, output);
      return output.size() > 0;
    }

    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    void preprocessor::get_preproc_arg(model_class & mo, token_list & output, const vector<token> & tokens, 
        int & pos, bool removquotes)
    {
      //const static string SPACE(" ");
      const int slinenumber = tokens[pos].line_number;
      const int nid = tokens[pos].namelist_number;
      bool ignorelinebreak = false;
      pos++; // march past the keyword
      if( pos >= tokens.size()) return;
      while( true ){
        if( !ignorelinebreak && tokens[pos].line_number > slinenumber ){ pos--; break; }
        if( ignorelinebreak && tokens[pos].line_number > slinenumber ){ ignorelinebreak = false; }
        if( nid != tokens[pos].namelist_number ){ pos --; break; }
        if( tokens[pos].line_number < slinenumber ){ pos --; break; } // nested include possible here
        if( tokens[pos].word == ";" ){ break; }
        else if( tokens[pos].word == "\\" ){ ignorelinebreak = true; }
        else if( ! local::process_define_macro(mo, tokens, pos, output) ){
          switch( tokens[pos].type ){
            case token::WORD :
              //output += (SPACE + tokens[pos].word + SPACE);
              output.push_back( tokens[pos].word );
              break;
            case token::STRING :
              if(removquotes){
                output.push_back( removequotes(tokens[pos].word) ); // += removequotes(tokens[pos].word);
              }else{
                output.push_back( tokens[pos].word ); // += tokens[pos].word;
              }
              break;
            default:
              output.push_back( tokens[pos].word ); // += tokens[pos].word;
          };     
        }
        pos++;
        if( pos >= tokens.size() ){ pos--;  break; }
      }
    } 
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    void preprocessor::combinetokens(const vector<token> & tokens, token_list & output, int spos, int epos){
       for(int pos = spos, i = 0; pos <= epos; i++, pos++)
       {
        output.push_back(tokens[pos].word);
       }
    };
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    void preprocessor::combinetokens(const vector<token> & tokens, token_list & output){
      int spos = 0;
      int epos = tokens.size() - 1;
      preprocessor::combinetokens(tokens, output, spos, epos);
    }
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    void preprocessor::tokenizestring(const string & input, vector<token> & output)
    {
      int line_number = 1;
      int pos = 0;
      token etoken;
      while( local::extract_token(input, pos, line_number, etoken) ){
        if( etoken.size() > 0 ){
          output.push_back( etoken );
        }
      }
    }
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    bool preprocessor::invoke(preprocessordata & pdata, const char * namelist)
    {
      try {
        pdata.readnamelist( namelist ); 
        return true;
      }catch(const cgcmexception & err){
        fprintf(stderr, "%s", err.what());
        return false;
      }
    };
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */

    /* --------------------------------------------------------------------------------

       Definition of the preprocessordata class :

       ------------------------------------------------------------------------------- */

    /* - - - - - - - - - - - - - - - - - - - - - - 
       namelistdata                            */ 
    namelistdata::namelistdata()
    {};

    bool namelistdata::open(const char * fname)
    {
      FILE * infile = fopen(fname, "r");
      if(!infile) return false;
      fseek(infile, 0, SEEK_END);
      unsigned long int infileSize = ftell(infile);
      rewind(infile);
      char * filedata = new char[infileSize+1];
      if(fread(filedata, sizeof(char), infileSize, infile) != infileSize){
        fprintf(stderr, "Warning: Error reading namelist input file.\n");
      }
      filedata[infileSize] = 0;   // null terminate the string!
      this->filecontent.assign(filedata); // convert char * to string
      delete [] filedata;         // delete dynamically allocated memory.
      return true; // success 
    }
    /*     namelistdata     
           - - - - - - - - - - - - - - - - - - - - - - */

    /* - - - - - - - - - - - - - - - - - - - - - - 
       token                                  */ 
    token::token()
    {
      this->type = NONE;
      this->line_number = -1; 
      this->namelist_number = -1;
    }

    token::token(const string & value, const int ln,ttype tp, int nl){
      this->word=value;
      this->line_number=ln;
      this->type = tp;
      this->namelist_number = nl;
    }
    /*      end token     
            - - - - - - - - - - - - - - - - - - - - - - */
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    preprocessordata::preprocessordata()
    {
      this->mo = NULL;
    }
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    preprocessordata::preprocessordata(model_class * model) : mo(model) 
  {}
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    void preprocessordata::readnamelist(const char * namelist)
    {
      // First check to see if the namelist file has been opened before
      // if it has, then fetch the token list for that file
      int namelist_id = -1;
      for(size_t i = 0; i < namelists.size(); i++){
        if(namelists[i]->filename == namelist){
          namelist_id = static_cast<int>(i); 
          this->current_namelist_index.push(i);
          break;
        }
      }
      if( namelist_id < 0 ){
        this->namelists.push_back( new namelistdata() );
        namelist_id = namelists.size() - 1;
        this->current_namelist_index.push(namelist_id);
        this->namelists[ namelist_id ]->filename.assign(namelist);
        preprocessor::local::open_new_namelist(*mo, *this, *(namelists.back()), namelist);
      }

      if(!preprocessor::local::preprocessfile( *this, *(namelists[namelist_id]) )){
        preprocessor::local::error("Error preprocessing file '%s'.", namelist, -10);
      }
      this->current_namelist_index.pop();
    }
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    bool preprocessordata::undefinemacro(const string & name)
    {
      return this->macrolist.erase(name) > 0;
    }
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    bool preprocessordata::definemacro(const string & mname, const token_list & value)
    {
      this->macrolist.insert( pair< string, token_list >( mname, value) );
      return true;
    }
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    bool preprocessordata::definemacro(const string & mname, const string & value)
    {
      token_list expr;
      extract_words(value, expr);
      this->definemacro(mname, expr);
      return true;
    }
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    const token_list & preprocessordata::getmacrobyname(const string & name) const
    {
      map< string, token_list >::const_iterator it = this->macrolist.find(name);
      if( it == this->macrolist.end() )
      { // line below will terminate program
        fatal_stop("Macro not found by lookup function getmacrobyname ....");
      }
      return (*it).second;
    }
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    bool preprocessordata::doesmacroexist(const string & name) const
    {
      map< string, token_list >::const_iterator it = this->macrolist.find(name);
      return ( it != this->macrolist.end() );
    };
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    const map< string, token_list > & preprocessordata::getmacrolist() const 
    {
      return this->macrolist;
    }
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    bool preprocessordata::addmacrolist( const string & defineexpr)
    {
      vector<token> tokens;
      preprocessor::tokenizestring(defineexpr, tokens);
      int i = 0;
      int spos = -1;
      int epos = -1;
      const string * macroname = NULL;
      while(i  < static_cast<int>(tokens.size())){
        if(macroname == NULL){
          macroname = &(tokens[i].word);
        }else{
          if( spos < 0 && tokens[i].word == "=" ){
            spos = i+1; 
          }else if( spos >= 0 && ( tokens[i].word == "," || i == static_cast<int>(tokens.size()) - 1 ) ){
            epos = ( tokens[i].word == "," ) ? i - 1 : i;
            token_list macro_expr;
            preprocessor::combinetokens(tokens, macro_expr, spos, epos);
            if(isVariableLegal( macroname->c_str() ) ){
              this->definemacro(* macroname, convert_token_list_to_string(macro_expr) );
            }else{
              preprocessor::local::error( "Illegal macro name: '%s'\n", macroname->c_str() );
              return false;
            }
            spos = -1;
            epos = -1;
            macroname = NULL;
          }
        }
        i++;
      }  
      return true;
    }
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
#ifdef DEBUG
    void preprocessordata::dumptokens() const {
      FILE * rout = getrout();
      const vector<token> & tokens = this->processed_namelist;
      const int tsize = tokens.size();
      int pos = 0;
      while ( pos < tsize ){
        int linenumber = tokens[pos].line_number;
        string namelist("unknown");
        int nid = tokens[pos].namelist_number;
        if( nid >= 0 && nid < this->namelists.size() ) namelist = this->namelists[nid]->filename;
        fprintf(rout, "%s:%i\t%s\n", namelist.c_str(), linenumber, tokens[pos].word.c_str());
        pos++;
      }
    };
    /* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
    void preprocessordata::dump() const {
      FILE * rout = getrout();
      const vector<token> & tokens = this->processed_namelist;
      const int tsize = tokens.size();
      int pos = 0;
      while ( pos < tsize ){
        int linenumber = tokens[pos].line_number;
        int spos = pos;
        int epos = pos;
        while( true ){
          if( epos >= tsize ) { epos--; break; }
          if( tokens[epos].line_number > linenumber ){ epos--; break; }
          epos++;
        }
        token_list line;
        string namelist("unknown");
        int nid = tokens[spos].namelist_number;
        if( nid >= 0 && nid < this->namelists.size() ) namelist = this->namelists[nid]->filename;
        preprocessor::combinetokens(tokens, line, spos, epos);
        fprintf(rout, "%s:%i %s\n", namelist.c_str(), linenumber, convert_token_list_to_string(line).c_str());
        pos=epos+1;
      }
    };

#endif
