#include <./util.h>

int count_new_lines(std::vector<quex::Token>& tokens_between_code)
  quex::Token token
  int count = 0;
  for(unsigned int i=0; i < tokens_between_code.size(); ++i)
    token = tokens_between_code[i]
    if( token.type_id() == QUEX_TKN_NEWLINE)
      ++count
  return count

void write_out(std::ostringstream & osstream, std::vector<quex::Token>& tokens_between_code)
  quex::Token token
  for(unsigned int i=0; i < tokens_between_code.size(); ++i)
    token = tokens_between_code[i]
    if( token.type_id() != QUEX_TKN_BACKSLASH)
      if( token.type_id() == QUEX_TKN_NEWLINE)
        osstream << std::endl
      else
        osstream << token.get_text().c_str()

void get_token(quex::Token& token, quex::depythonize_c_lex& qlex, std::vector<quex::Token>& tokens_between_code)
  qlex.receive(&token)
  tokens_between_code.clear()
  while(token.type_id() != QUEX_TKN_TERMINATION)
    // Leave out QUEX_TKN_EVENT_MODE_CHANGE and QUEX_TKN_ERROR_MISALIGNED_INDENTATION 
    // by now
    if(  // Ignorable tokens
      token.type_id() == QUEX_TKN_WHITESPACE ||
      token.type_id() == QUEX_TKN_COMMENT ||
      token.type_id() == QUEX_TKN_BACKSLASH ||
      //token.type_id() == QUEX_TKN_PREPROCESSOR_SINGLE_LINE ||
      token.type_id() == QUEX_TKN_PREPROCESSOR_MULTILINE_CONTINUE ||
      token.type_id() == QUEX_TKN_NEWLINE  //||
    )
      tokens_between_code.push_back(token)
    else if(
       //Compilable tokens and indents events
      token.type_id() == QUEX_TKN_CODE ||
      token.type_id() == QUEX_TKN_WORD ||
      token.type_id() == QUEX_TKN_NUMBER ||
      token.type_id() == QUEX_TKN_STRING ||
      token.type_id() == QUEX_TKN_SEMICOLON ||
      token.type_id() == QUEX_TKN_START_PARENTHESIS_BLOCK ||
      token.type_id() == QUEX_TKN_END_PARENTHESIS_BLOCK ||
      token.type_id() == QUEX_TKN_PREPROCESSOR_MULTILINE_START ||
      token.type_id() == QUEX_TKN_PREPROCESSOR_MULTILINE_END ||
       //Indent events
      token.type_id() == QUEX_TKN_BLOCK_OPEN ||
      token.type_id() == QUEX_TKN_BLOCK_CLOSE   //||
    )
      return
    qlex.receive(&token)

bool is_in_buffer(std::vector<quex::Token>& history, unsigned int TYPE_ID)
  quex::Token  Token
  int size = history.size()
  
  for(int i=0; i < size; ++i)
    Token = history[i]
    if(Token.type_id() == TYPE_ID)
      return true
    
  return false

void print_many_block_close(  std::ostringstream & osstream
                  , quex::depythonize_c_lex & qlex
                  , quex::Token & token
                  , quex::Token & tokenNext
                  , std::vector<quex::Token> & tokens_before_token
                  , std::vector<quex::Token> & tokens_before_tokenNext
                  , bool preprocessor_multiline_end
                  )
  //Write the non compilable tokens
  std::vector <quex::Token>* tokens_before_tokenTemp = new  std::vector <quex::Token>()
  std::vector <std::vector<quex::Token>* > tokens_before_token_set

  //need to print noncompilable tokens before QUEX_TKN_PREPROCESSOR_MULTILINE_END after the QUEX_TKN_BLOCK_CLOSE
  if(preprocessor_multiline_end)
    tokens_before_tokenTemp = new  std::vector <quex::Token>()
    tokens_before_tokenTemp->swap(tokens_before_token) //to print later
    tokens_before_token_set.push_back(tokens_before_tokenTemp)
    
  //Step forward one token
  token = tokenNext
  tokens_before_token.swap(tokens_before_tokenNext)
  get_token(tokenNext,qlex,tokens_before_tokenNext)

  //Need to close all the braces before the Macro ending
  while( //tokenNext.type_id() != QUEX_TKN_TERMINATION &&
  token.type_id() == QUEX_TKN_BLOCK_CLOSE
  )
    //Add to the set
    tokens_before_tokenTemp = new  std::vector <quex::Token>()
    tokens_before_tokenTemp->swap(tokens_before_token) //to print later
    tokens_before_token_set.push_back(tokens_before_tokenTemp)

    //print the }
    osstream << "}"

    //Step forward one token
    token = tokenNext
    tokens_before_token.swap(tokens_before_tokenNext)
    get_token(tokenNext,qlex,tokens_before_tokenNext)

  //osstream << "2"
  for(uint32_t i=0; i< tokens_before_token_set.size() ;++i)
    tokens_before_tokenTemp = tokens_before_token_set[i]
    write_out(osstream, *tokens_before_tokenTemp)
    delete tokens_before_tokenTemp
  return

//--helper--
