// re.cpp
//
// Copyright (C) 2008  Maksim Sipos <msipos@mailc.net>
//
// This file is a part of the Ripe language project.
//
// Ripe is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see <http://www.gnu.org/licenses/>.

#include "standard.hpp"
#include <cstring>
#include <cstdio>
#include "util/re.hpp"
#include "util/mem.hpp"
#include "util/nfa.hpp"
#include "adts/vector.hpp"

namespace Ripe {

// Possible REToken types:
enum RETokenType {
  TypeNFA,
  TypeStar,
  TypePlus,
  TypeQuestion,
  TypeLParen,
  TypeRParen,
  TypeEOF,
};

struct REToken {
  RETokenType type;
  NFAChunk chunk; // Used only if type == TypeNFA
};

// Builders of NFAChunks
static NFAChunk re_make_any();
static NFAChunk re_make_char(UniChar letter);

// Parses the list of token starting at start_idx. Section that is parsed
// ends with the end_type token type. 
static NFAChunk re_parse(Vector<REToken>* tokens, uint32_t start_idx, 
                         RETokenType end_type, uint32_t* end_idx);

NFANode* re_compile(const char* str, int term_id)
{
  assert(str != NULL);
  ErrorInvalidRE err;
  const char* end = str + std::strlen(str);
  const char* s = str;

  // Check that the regexp is not an empty string.
  if (str == end){
    err.message = "Empty string is not a valid regular expression.";
    throw err;
  }
  
  // Tokenize the regular expression.
  Vector<REToken> tokens;
  while (s != end){
    UniChar letter = read_uni_char(&s, end);
    
    REToken token;
    switch (letter) {
      // +, *, ?, ( and ) can all be tokenized with a simple map
      case '+':
        token.type = TypePlus;
        break;
      case '*':
        token.type = TypeStar;
        break;      
      case '?':
        token.type = TypeQuestion;
        break;
      case '(':
        token.type = TypeLParen;
        break;
      case ')':
        token.type = TypeRParen;
        break;
      case '.':
        token.type = TypeNFA;
        token.chunk = re_make_any();
        break;
      default:
        token.type = TypeNFA;
        token.chunk = re_make_char(letter);
    }
    tokens.append(token);
  }
  REToken eof_token;
  eof_token.type = TypeEOF;
  tokens.append(eof_token);

  // Recursively parse the regular expression  
  uint32_t end_idx;
  NFAChunk chunk = re_parse(&tokens, 0, TypeEOF, &end_idx);

  NFATermNode* term_node = new NFATermNode;
  term_node->id = term_id;
  chunk.route(term_node);

  return chunk.input;
}

NFAChunk re_make_any()
{
  NFAChunk chunk;
  NFACharNode* node = new NFACharNode;
  node->negative = false;
  NFAMatchingClass mc;
  mc.type = ClassAny;
  node->match_classes.append(mc);
  chunk.input = node;
  chunk.add_output(node);
  return chunk;
}

NFAChunk re_make_char(UniChar letter)
{
  NFAChunk chunk;
  NFACharNode* node = new NFACharNode;
  node->negative = false;
  NFAMatchingClass mc;
  mc.type = ClassSingle;
  mc.extra1 = letter;
  node->match_classes.append(mc);
  chunk.input = node;
  chunk.add_output(node);
  return chunk;
}

NFAChunk re_make_star(NFAChunk expr)
{
  NFAChunk chunk;
  NFASplitNode* node = new NFASplitNode;
  node->route(expr.input);
  expr.route(node);
  chunk.input = node;
  chunk.add_output(node);
  return chunk;
}

bool re_match(const char* s, NFANode* pattern, bool greedy, int* id, 
              int* length)
{
  const char* start = s;
  const char* end = s + strlen(s);

  NFAState state;
  pattern->activate(&state);

  // These are used when greedy = true
  bool matched = false;
  int matched_len = 0, matched_id = 0;

  while (s != end){
    UniChar letter = read_uni_char(&s, end);
    state.step(letter);
    if (not state.any()){
      // Break out of the loop since no more active states.
      break;
    }

    // Go through the active states and find if any are terminating. Match
    // the id with the highest id among the terminating states. This is
    // needed for the operation of the lexer. This means that the rule that
    // was added the latest will get precedence if it matches the same text
    // like an older rule.
    int max_id = -1;
    for (unsigned int i = 0; i < state.active_nodes.size; i++){
      NFANode* node = state.active_nodes[i];

      if (node->terminating()){
        NFATermNode* term_node = (NFATermNode*) node;
        int this_id = term_node->id;

        if (this_id > max_id){
          max_id = this_id;
          matched_len = s - start;
          matched_id = this_id;
          // Note the match and continue processing
          matched = true;
        }
      } // if node is terminating
    } // for node in active nodes

    if (matched and not greedy){
      *id = matched_id;
      *length = matched_len;
      return true;
    }
  } /* while (s != end) */

  if (matched){
    *id = matched_id;
    *length = matched_len;
    return true;
  }
  return false;
}

NFAChunk re_parse(Vector<REToken>* tokens, uint32_t start_idx, 
                  RETokenType end_type, uint32_t* end_idx)
{
  ErrorInvalidRE err;
  uint32_t i = start_idx;
  uint32_t end_i;
  REToken tok;
  
  Vector<REToken> temp_tokens;
  for(;;) {
    tok = tokens->get(i);
 
    // Handle end tokens
    if (tok.type == TypeEOF) {
      if (end_type != TypeEOF) {
        err.message = "Unbalanced parantheses. Expected ')'.";
        throw err;
      } else {
        *end_idx = i;
        break;
      }
    }
    if (tok.type == TypeRParen) {
      if (end_type != TypeRParen) {
        err.message = "Unbalanced parantheses. Unexpected ')'.";
        throw err;
      } else {
        *end_idx = i;
        break;
      }
    }
    
    NFAChunk chunk;
    REToken new_token;
    switch (tok.type){
      case TypeLParen:
        chunk = re_parse(tokens, i+1, TypeRParen, &end_i);
        i = end_i;
        // Record the information from re_parse.
        new_token.type = TypeNFA;
        new_token.chunk = chunk;
        temp_tokens.append(new_token);
        break;
      case TypeNFA:
      case TypeStar:
      case TypePlus:
      case TypeQuestion:
        temp_tokens.append(tok);
        break;
      case TypeRParen:
      case TypeEOF:
        assert_never();
        break;
    }
    i++;

  } // for(;;)

  if (temp_tokens.size == 0) {
    if (end_type == TypeEOF) {
      err.message = "Empty string is an invalid regular expression.";
      throw err;
    } else if (end_type == TypeRParen) {
      err.message = 
        "Empty parentheses '()' are not allowed in a regular expression.";
      throw err;
    } else assert_never();
  }
  
  // Vector copy
  Vector<REToken>* cur_tokens = new Vector<REToken>(&temp_tokens);

  // Handle the *, + and ? operators
  {
    REToken prev_tok;
    REToken new_tok;
    Vector<REToken>* processed_tokens = new Vector<REToken>;
    for (i = 0; i < cur_tokens->size; i++){
      tok = cur_tokens->get(i);
      if (tok.type == TypeStar or tok.type == TypePlus
           or tok.type == TypeQuestion){

        // String representation of this operator:
        const char* s_op = NULL;
        s_op = (tok.type == TypeStar) ? "*" : s_op;
        s_op = (tok.type == TypePlus) ? "+" : s_op;
        s_op = (tok.type == TypeQuestion) ? "?" : s_op;

        if (processed_tokens->size == 0) {
          err.message.format("'%s' operator must be preceded by a valid "
                             "expression.", s_op);
          delete cur_tokens;
          delete processed_tokens;
          throw(err);
        }

        prev_tok = processed_tokens->pop();
        if (prev_tok.type != TypeNFA) {
          err.message.format("'%s' operator must be preceded by a valid "
                             "expression.", s_op);
          delete cur_tokens;
          delete processed_tokens;
          throw err;
        }
        
        new_tok.type = TypeNFA;
        if (tok.type == TypeStar)
          new_tok.chunk = re_make_star(prev_tok.chunk);
        processed_tokens->append(new_tok);
      } // tok.type = '*' | '?' | '+'
      else {
        processed_tokens->append(tok);
      }
    }
    delete cur_tokens;
    cur_tokens = processed_tokens;
  }
  
  tok = cur_tokens->pop();
  delete cur_tokens;
  if (tok.type != TypeNFA){
    err.message = "Operator used improperly.";
    throw(err);
  }
  return tok.chunk;
}

}
