#include "PreCompile.hpp"
#include "Tokenizer.hpp"

namespace CPreProc
{
  class TokenDef
  {
  public:
    virtual ~TokenDef() { }

    virtual Token   parse(Tokenizer &tokenizer) = 0;

    TokenDef*       ll_next;

    const gchar*    str;
    unsigned char   str_len;
    TokenType       token_type;
  };

  Tokenizer::Tokenizer()
  {
    assert(inited);

    cur_char = end_char = 0;
  }

  Tokenizer::Tokenizer(const gchar *begin,const gchar *end)
  {
    assert(inited);
    assert(begin <= end);

    cur_char = begin;
    end_char = end;
  }

  Tokenizer::Tokenizer(const gchar *null_terminated)
  {
    assert(inited);

    cur_char = end_char = null_terminated;
    while(*end_char)
      end_char++;
  }

  bool Tokenizer::compare_advance_on_equal(const gchar *b)
  {
    const gchar *a = cur_char;
    while(a != end_char)
    {
      if(*a != *b)
        break;
      a++;
      b++;
    }

    if(*b == 0) // b must be at it's end
    {
      cur_char = a;
      return true;
    }
    else
      return false;
  }

  gunichar Tokenizer::cur_utf8() const
  {
    assert(cur_char < end_char);

    if((guint8)*cur_char < 0x80)
      return *cur_char;
    else
    {
      const char *cur_byte = cur_char;

      int mask,len;
      utf8_compute(*cur_byte,mask,len);
      if(len == -1)
        throw TokenizerInvalidUTF8Exception();

      gunichar res = *cur_byte & mask;
      cur_byte++;

      for(int i = 1;i < len;i++)
      {
        if(cur_char == end_char)
          throw TokenizerInvalidUTF8Exception();
        if((*cur_char & 0xc0) != 0x80)
          throw TokenizerInvalidUTF8Exception();

        res <<= 6;
        res |= *cur_char & 0x3f;
        cur_byte++;
      }

      return res;
    }
  }

  void Tokenizer::utf8_compute(gchar initial_char, int &mask, int &len)
  {
    if ((guint8)initial_char < 128)
    {
      len = 1;
      mask = 0x7f;
    }
    else if ((initial_char & 0xe0) == 0xc0)
    {
      len = 2;
      mask = 0x1f;
    }
    else if ((initial_char & 0xf0) == 0xe0)
    {
      len = 3;
      mask = 0x0f;
    }
    else if ((initial_char & 0xf8) == 0xf0)
    {
      len = 4;
      mask = 0x07;
    }
    else if ((initial_char & 0xfc) == 0xf8)
    {
      len = 5;
      mask = 0x03;
    }
    else if ((initial_char & 0xfe) == 0xfc)
    {
      len = 6;
      mask = 0x01;
    }
    else
      len = -1;
  }

  // advances in an utf8 aware manner. If two or more bytes form one utf8 char, all of them are skipped at once.
  // under most circumstance (ie whenever any extended char would be an error), using just advance should be fine.
  // of course, you can mix the usage of advance and advance_utf8.
  void Tokenizer::advance_utf8()
  {
    assert(cur_char < end_char);
    cur_char = g_utf8_next_char(cur_char);
    if(cur_char > end_char)
      throw TokenizerInvalidUTF8Exception();
  }

  bool Tokenizer::skip_optional_whitespace()
  {
    bool whitespace_found = cur_char != end_char &&
      is_whitespace(*cur_char);

    while(cur_char != end_char)
    {
      if(!is_whitespace(*cur_char))
      {
        break;
      }
      cur_char++;
    }

    return whitespace_found;
  }

  void Tokenizer::skip_required_whitespace()
  {
    assert(cur_char < end_char);

    if(!is_whitespace(*cur_char))
    {
      throw TokenizerException("Unexpected token. Whitespace expected.");
    }
    cur_char++;

    skip_optional_whitespace();
  }

  Token Tokenizer::read_pp_token()
  {
    assert(cur_char < end_char);

    TokenDef *def = find_token_def();
    if(def)
    {
      return def->parse(*this);
    }
    else
    {
      Token ret;
      ret.begin = cur_char++;
      ret.end = cur_char;
      ret.token_id = 0;
      ret.type = TokenType_Misc;
      return ret;
    }
  }

  Token Tokenizer::read_identifier()
  {
    Token ret = read_pp_token();
    if(ret.type != TokenType_Identifier)
      throw TokenizerException("Unexpected token. Identifier expected.");
    return ret;
  }

  Token Tokenizer::read_to_end()
  {
    assert(cur_char < end_char);

    Token ret;
    ret.begin = cur_char;
    ret.end = end_char;
    ret.type = TokenType_Misc;

    cur_char = end_char;

    return ret;
  }

  TokenDef* Tokenizer::find_token_def()
  {
    assert(cur_char < end_char);
    assert(inited);

    // check the tokens in the token_def_table
    TokenDef *token_def = pp_token_def_table[*cur_char];
    int char_index = 1;
    while(token_def &&
      char_index < token_def->str_len &&
      cur_char + char_index != end_char)
    {
      if(token_def->str[char_index] == cur_char[char_index])
        char_index++;
      else
        token_def = token_def->ll_next;
    }

    if(token_def && char_index == token_def->str_len)
      return token_def;
    else
      return NULL;
  }

  bool Tokenizer::try_read_header_name(Token &out)
  {
    assert(cur_char < end_char);

    const char *read_ptr = cur_char;

    char close_char;
    if(*read_ptr == '<')
      close_char = '>';
    else if(*read_ptr == '"')
      close_char = '"';
    else
      return false;

    read_ptr++;
    while(1)
    {
      if(*read_ptr == close_char)
      {
        read_ptr++;
        break;
      }

      read_ptr++;
      if(read_ptr == end_char)
        return false;
    }

    out.begin = cur_char + 1;
    out.end = read_ptr - 1;
    out.type = TokenType_HeaderName;
    if(close_char == '"')
      out.token_id = HeaderNameType_Quoted;
    else
      out.token_id = HeaderNameType_AngularBrackets;

    cur_char = read_ptr;
    return true;
  }

  ///////////////////
  // Token definitions stuff
  TokenDef* Tokenizer::pp_token_def_table[256];
  bool Tokenizer::inited = false;

  void Tokenizer::init()
  {
    assert(!inited);  // you shouldn't init it twice

    try
    {
      inited = true;

      init_punctuator_token_defs();
      init_string_literal_token_defs();
      init_identifier_token_defs();
      init_pp_number_token_defs();
      init_whitespace_defs();
    }
    catch(...)
    {
      shutdown();
      throw;
    }
  }

  void Tokenizer::shutdown()
  {
    if(!inited)
      return;

    for(int i = 0;i < 256;i++)
    {
      TokenDef *token_def = pp_token_def_table[i];
      while(token_def)
      {
        TokenDef *next = token_def->ll_next;
        delete token_def;
        token_def = next;
      }
    }

    inited = false;
  }

  void Tokenizer::add_token_def(TokenDef *def)
  {
    // insert it in the bucket for it's first char, and then sort
    // the linked list of that bucket in a reverse alphabetic order

    int bucket = def->str[0];

    TokenDef *after_this = 0;
    TokenDef *before_this = pp_token_def_table[bucket];
    while(before_this)
    {
      if(strcmp(def->str,before_this->str) != -1)
        break;

      after_this = before_this;
      before_this = before_this->ll_next;
    }

    if(after_this)
      after_this->ll_next = def;
    else
      pp_token_def_table[bucket] = def;
    def->ll_next = before_this;
  }

  class PunctuatorDef : public TokenDef
  {
  public:
    PunctuatorDef(const gchar *str,Punctuator punctuator)
    {
      token_type = TokenType_Punctuator;

      this->str = str;
      str_len = (int)strlen(str);

      this->punctuator = punctuator;
    }

    Token parse(Tokenizer &tokenizer)
    {
      Token ret;
      ret.begin = tokenizer.cur_char;
      tokenizer.cur_char += str_len;
      ret.end = tokenizer.cur_char;
      ret.type = TokenType_Punctuator;
      ret.token_id = punctuator;
      return ret;
    }

    Punctuator punctuator;
  };

  void Tokenizer::init_punctuator_token_defs()
  {
    PunctuatorDef *punctuator_defs[] =
    {
      new PunctuatorDef("[",       Punct_LeftSquareBracket),
      new PunctuatorDef("]",       Punct_RightSquareBracket),
      new PunctuatorDef("(",       Punct_LeftParenthesis),
      new PunctuatorDef(")",       Punct_RightParenthesis),
      new PunctuatorDef("{",       Punct_LeftCurlyBracket),
      new PunctuatorDef("}",       Punct_RightCurlyBracket),
      new PunctuatorDef(".",       Punct_Dot),
      new PunctuatorDef("->",      Punct_PtrMember),
      new PunctuatorDef("++",      Punct_Inc),
      new PunctuatorDef("--",      Punct_Dec),
      new PunctuatorDef("&",       Punct_Amp),
      new PunctuatorDef("*",       Punct_Star),
      new PunctuatorDef("+",       Punct_Plus),
      new PunctuatorDef("-",       Punct_Minus),
      new PunctuatorDef("~",       Punct_Invert),
      new PunctuatorDef("!",       Punct_Not),
      new PunctuatorDef("/",       Punct_Div),
      new PunctuatorDef("%",       Punct_Modulo),
      new PunctuatorDef("<<",      Punct_ShiftLeft),
      new PunctuatorDef(">>",      Punct_ShiftRight),
      new PunctuatorDef("<",       Punct_LessThan),
      new PunctuatorDef(">",       Punct_GreaterThan),
      new PunctuatorDef("<=",      Punct_LessOrEqual),
      new PunctuatorDef(">=",      Punct_GreaterOrEqual),
      new PunctuatorDef("==",      Punct_Equal),
      new PunctuatorDef("!=",      Punct_Unequal),
      new PunctuatorDef("^",       Punct_BitXor),
      new PunctuatorDef("|",       Punct_BitOr),
      new PunctuatorDef("&&",      Punct_LogAnd),
      new PunctuatorDef("||",      Punct_LogOr),
      new PunctuatorDef("?",       Punct_QuestionMark),
      new PunctuatorDef(":",       Punct_Colon),
      new PunctuatorDef(";",       Punct_SemiColon),
      new PunctuatorDef("...",     Punct_ThreeDots),
      new PunctuatorDef("=",       Punct_Assign),
      new PunctuatorDef("*=",      Punct_MulAndAssign),
      new PunctuatorDef("/=",      Punct_DivAndAssign),
      new PunctuatorDef("%=",      Punct_ModAndAssign),
      new PunctuatorDef("+=",      Punct_AddAndAssign),
      new PunctuatorDef("-=",      Punct_SubAndAssign),
      new PunctuatorDef("<<=",     Punct_ShiftLeftAndAssign),
      new PunctuatorDef(">>=",     Punct_ShiftRightAndAssign),
      new PunctuatorDef("&=",      Punct_BitAndAndAssign),
      new PunctuatorDef("^=",      Punct_BitXorAndAssign),
      new PunctuatorDef("|=",      Punct_BitOrAndAssign),
      new PunctuatorDef(",",       Punct_Comma),
      new PunctuatorDef("#",       Punct_Sharp),
      new PunctuatorDef("##",      Punct_DoubleSharp),
      new PunctuatorDef("<:",      Punct_LeftSquareBracket),
      new PunctuatorDef(":>",      Punct_RightSquareBracket),
      new PunctuatorDef("<%",      Punct_LeftCurlyBracket),
      new PunctuatorDef("%>",      Punct_RightCurlyBracket),
      new PunctuatorDef("%:",      Punct_Sharp),
      new PunctuatorDef("%:%:",    Punct_DoubleSharp),
    };

    for(int i = 0;i < (int)(sizeof(punctuator_defs) / sizeof(punctuator_defs[0]));i++)
    {
      add_token_def(punctuator_defs[i]);
    }
  }

  class StringLiteralDef : public TokenDef
  {
  public:
    virtual Token parse(CPreProc::Tokenizer &tokenizer)
    {
      Token ret;
      ret.begin = tokenizer.cur_char;

      tokenizer.cur_char += str_len;    // skip the " or the L"

      while(tokenizer.cur_char != tokenizer.end_char)
      {
        if(*tokenizer.cur_char == '\"')
          break;
        else if(*tokenizer.cur_char == '\\')
        {
          tokenizer.cur_char++;
          if(tokenizer.cur_char == tokenizer.end_char)
            throw TokenizerException("Newline in constant.");
        }

        tokenizer.cur_char++;
      }
      if(*tokenizer.cur_char != '\"')
        throw TokenizerException("Newline in constant.");

      tokenizer.cur_char++;

      ret.end = tokenizer.cur_char;

      ret.type = TokenType_StringLiteral;
      ret.token_id = string_literal_type;
      return ret;
    }

    StringLiteralType string_literal_type;
  };

  void Tokenizer::init_string_literal_token_defs()
  {
    StringLiteralDef *token_def = new StringLiteralDef;
    token_def->str = "\"";
    token_def->str_len = (int)strlen(token_def->str);
    token_def->token_type = TokenType_StringLiteral;
    token_def->string_literal_type = StringLiteralType_CharString;
    add_token_def(token_def);

    token_def = new StringLiteralDef;
    token_def->str = "L\"";
    token_def->str_len = (int)strlen(token_def->str);
    token_def->token_type = TokenType_StringLiteral;
    token_def->string_literal_type = StringLiteralType_WideString;
    add_token_def(token_def);
  }

  class IdentifierTokenDef : public TokenDef
  {
  public:
    IdentifierTokenDef()
    {
      token_type = TokenType_Identifier;
    }

    virtual Token parse(Tokenizer &tokenizer)
    {
      assert(tokenizer.cur_char < tokenizer.end_char);

      Token ret;
      ret.begin = tokenizer.cur_char;

      if(!is_identifier_first_char(*tokenizer.cur_char))
        throw TokenizerException("Unexpected token. Identifier expected.");
      tokenizer.cur_char++;

      while(tokenizer.cur_char < tokenizer.end_char)
      {
        if((guint8)*tokenizer.cur_char < 0x80)
        {
          if(is_identifier_char(*tokenizer.cur_char))
            tokenizer.cur_char++;
          else
            break;
        }
        else
        {
          if(is_identifier_char(tokenizer.cur_utf8()))
            tokenizer.advance_utf8();
          else
            break;
        }
      }

      ret.end = tokenizer.cur_char;
      ret.type = TokenType_Identifier;
      ret.token_id = 0;

      return ret;
    }
  };

  void Tokenizer::init_identifier_token_defs()
  {
    const char first_chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_";
    static char reserved_strings[sizeof(first_chars)][2];
    int reserved_string_ptr = 0;
    for(const char *c = first_chars;*c;c++)
    {
      IdentifierTokenDef *token_def = new IdentifierTokenDef();

      char *str = reserved_strings[reserved_string_ptr++];
      str[0] = *c;
      str[1] = 0;
      token_def->str = str;

      token_def->str_len = 1;

      add_token_def(token_def);
    }
  }

  class PPNumberTokenDef : public TokenDef
  {
  public:
    virtual Token parse(Tokenizer &tokenizer)
    {
      assert(tokenizer.cur_char < tokenizer.end_char);
      assert(is_digit(*tokenizer.cur_char) || *tokenizer.cur_char == '.');

      Token ret;
      ret.begin = tokenizer.cur_char;

      tokenizer.cur_char += str_len;
      while(tokenizer.cur_char != tokenizer.end_char)
      {
        if(*tokenizer.cur_char == 'e' || *tokenizer.cur_char == 'E' ||
          *tokenizer.cur_char == 'p' || *tokenizer.cur_char == 'P')
        {
          tokenizer.cur_char++;
          if(tokenizer.cur_char != tokenizer.end_char &&
            (*tokenizer.cur_char == '+' || *tokenizer.cur_char == '-'))
          {
            tokenizer.cur_char++;
          }
        }
        else if(
          // includes decimal digits, hexadecimal digits, the x in the 0xff sequence, and also some other stuff
          is_identifier_char(*tokenizer.cur_char) ||
          *tokenizer.cur_char == '.')
        {
          tokenizer.cur_char++;
        }
        else
          break;
      }

      ret.end = tokenizer.cur_char;
      ret.type = TokenType_PPNumber;
      ret.token_id = 0;

      return ret;
    }
  };

  void Tokenizer::init_pp_number_token_defs()
  {
    static char reserved_strings[20][3];
    int reserved_string_ptr = 0;
    for(char c = '0';c <= '9';c++)
    {
      PPNumberTokenDef *token_def = new PPNumberTokenDef();
      char *str = reserved_strings[reserved_string_ptr++];
      str[0] = c;
      str[1] = 0;
      token_def->str = str;
      token_def->str_len = 1;
      add_token_def(token_def);

      // one that starts with a . and is followed by a digit
      token_def = new PPNumberTokenDef();
      str = reserved_strings[reserved_string_ptr++];
      str[0] = '.';
      str[1] = c;
      str[2] = 0;
      token_def->str = str;
      token_def->str_len = 2;
      add_token_def(token_def);
    }
  }

  class WhitespaceTokenDef : public TokenDef
  {
  public:
    virtual Token parse(Tokenizer &tokenizer)
    {
      Token ret;
      ret.begin = tokenizer.cur_char;
      tokenizer.skip_optional_whitespace();
      ret.end = tokenizer.cur_char;
      ret.type = TokenType_WhiteSpace;
      ret.token_id = 0;
      return ret;
    }
  };

  void Tokenizer::init_whitespace_defs()
  {
    static gchar space_str[] = " ";
    static gchar tab_str[] = "\t";

    WhitespaceTokenDef *token_def = new WhitespaceTokenDef;
    token_def->str = space_str;
    token_def->str_len = 1;
    add_token_def(token_def);

    token_def = new WhitespaceTokenDef;
    token_def->str = tab_str;
    token_def->str_len = 1;
    add_token_def(token_def);
  }

  /*

  static KeywordDef keyword_defs[] =
  {
    {"auto",      Keyword_Auto},
    {"break",     Keyword_Break},
    {"case",      Keyword_Case},
    {"char",      Keyword_Char},
    {"const",     Keyword_Const},
    {"continue",  Keyword_Continue},
    {"default",   Keyword_Default},
    {"do",        Keyword_Do},
    {"double",    Keyword_Double},
    {"else",      Keyword_Else},
    {"enum",      Keyword_Enum},
    {"extern",    Keyword_Extern},
    {"float",     Keyword_Float},
    {"for",       Keyword_For},
    {"goto",      Keyword_Goto},
    {"if",        Keyword_If},
    {"inline",    Keyword_Inline},
    {"int",       Keyword_Int},
    {"long",      Keyword_Long},
    {"register",  Keyword_Register},
    {"restrict",  Keyword_Restrict},
    {"return",    Keyword_Return},
    {"short",     Keyword_Short},
    {"signed",    Keyword_Signed},
    {"sizeof",    Keyword_SizeOf},
    {"static",    Keyword_Static},
    {"struct",    Keyword_Struct},
    {"switch",    Keyword_switch},
    {"typedef",   Keyword_TypeDef},
    {"union",     Keyword_Union},
    {"unsigned",  Keyword_Unsigned},
    {"void",      Keyword_Void},
    {"volatile",  Keyword_Volatile},
    {"while",     Keyword_While},
    {"_Bool",     Keyword_Bool},
    {"_Complex",  Keyword_Complex},
    {"_Imaginary",Keyword_Imaginary},
  };*/

  //////////
  // Token
  const Token Token::zero_pp_number = create_constant_token("0",TokenType_PPNumber,-1);
  const Token Token::one_pp_number = create_constant_token("1",TokenType_PPNumber,-1);
  const Token Token::single_space_whitespace = create_constant_token(" ",TokenType_WhiteSpace,0);
  const Token Token::placemaker_pp_token = create_constant_token("",TokenType_PlacemakerPPToken,0);

  Token Token::create_constant_token(const char *str,guint16 type,guint16 token_id)
  {
    Token ret;
    ret.begin = str;
    ret.end = str + strlen(str);
    ret.type = type;
    ret.token_id = token_id;
    return ret;
  }

  Glib::ustring Token::to_string() const
  {
    Glib::ustring ret;
    ret.reserve(end - begin);
    for(const char *c = begin;c != end;c++)
      ret += *c;
    return ret;
  }

  bool Token::operator == (const gchar *b) const
  {
    //assert(token_type_needs_string(get_token_type()));
    assert(begin && end);

    for(const gchar *a = begin;a != end;a++)
    {
      assert(*a != 0);

      if(*a != *b)
        return false;
      b++;
    }

    return *b == 0;
  }

  bool Token::operator == (const Glib::ustring &b) const
  {
    return *this == b.c_str();
  }

  ///////
  // Utility functions
  bool validate_identifier(const gchar *begin,const gchar *end)
  {
    if(!end)
      end = begin + strlen(begin);

    assert(g_utf8_validate(begin,(gssize)(end - begin),0));

    if(begin == end)
      return false;

    const gchar *c = begin;

    if(!is_identifier_first_char(g_utf8_get_char(c)))
      return false;

    c = g_utf8_next_char(c);

    for(;c != end;c = g_utf8_next_char(c))
    {
      if(!is_identifier_char(g_utf8_get_char(c)))
        return false;
    }

    return true;
  }
}

