
#include <lexer.h>

#include <regex>
#include <unordered_map>

#define panic(...)                                        \
  do {                                                    \
    fprintf(stderr, "some error near line: %d\n", _line); \
    abort();                                              \
  } while (0)

const std::regex reIdentifier("^[\\w]+");
const std::regex reNumber(
    "^0[xX][0-9a-fA-F]*(\\.[0-9a-fA-F]*)?([pP][+\\-]?[0-9]+)?|^[0-9]*(\\.[0-9]*"
    ")?([eE][+\\-]?[0-9]+)?");

std::unordered_map<std::string, const TokenType> keywords = {
    {"register", kTokenRegister},
    {"func", kTokenFunc},
    {"var", kTokenVar},
    {"auto", kTokenAuto},
    {"void", kTokenVoid},
    {"bool", kTokenBool},
    {"char", kTokenChar},
    {"short", kTokenShort},
    {"int", kTokenInt},
    {"long", kTokenLong},
    {"float", kTokenFloat},
    {"double", kTokenDouble},
    {"signed", kTokenSigned},
    {"unsigned", kTokenUnsigned},
    {"true", kTokenTrue},
    {"false", kTokenFalse},
    {"enum", kTokenEnum},
    {"struct", kTokenStruct},
    {"union", kTokenUnion},
    {"extern", kTokenExtern},
    {"static", kTokenStatic},
    {"const", kTokenConst},
    {"sizeof", kTokenSizeof},
    {"typedef", kTokenTypedef},
    {"for", kTokenFor},
    {"do", kTokenDo},
    {"while", kTokenWhile},
    {"break", kTokenBreak},
    {"continue", kTokenContinue},
    {"if", kTokenIf},
    {"else", kTokenElse},
    {"switch", kTokenSwitch},
    {"case", kTokenCase},
    {"default", kTokenDefault},
    {"return", kTokenReturn},
    {"goto", kTokenGoto},
    {"volatile", kTokenVolatile},
};

bool is_white_space(char c) {
  switch (c) {
    case '\t':
    case '\n':
    case '\v':
    case '\f':
    case '\r':
    case ' ':
      return true;
    default:
      return false;
  }
}

bool is_new_line(char c) { return c == '\r' || c == '\n'; }
bool is_lattin(char c) {
  return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z');
}
bool is_digit(char c) { return '0' <= c && c <= '9'; }

int to_hex(char c) {
  if ('0' <= c && c <= '9') return c - '0';
  if ('a' <= c && c <= 'f') return c - 'a' + 10;
  if ('A' <= c && c <= 'F') return c - 'A' + 10;
  return -1;
}

Lexer::Lexer(const char* buf, size_t size)
    : _chunk(buf), _left(size), _line(1), _col(1), _cached(false) {}

bool Lexer::test(const std::string& t) const {
  return (_left >= t.size() && memcmp(&t[0], _chunk, t.size()) == 0);
}
void Lexer::walk(int n) {
  _chunk += n;
  _left -= n;
  _col += n;
}

void Lexer::skip_comment() {
  if (test("//")) {
    walk(2);
    while (_left > 0 && !is_new_line(_chunk[0])) {
      walk(1);
    }
  } else {
    walk(2);
    while (_left >= 2 && !test("*/")) {
      if (test("\r\n") || test("\n\r"))
        walk(2), _line++, _col = 1;
      else if (is_new_line(_chunk[0]))
        walk(1), _line++, _col = 1;
      else
        walk(1);
    }
    walk(2);
  }
}

void Lexer::skip_white_spaces() {
  while (_left > 0) {
    if (test("/*") || test("//")) {
      skip_comment();
    } else if (test("\r\n") || test("\n\r")) {
      walk(2);
      _line++, _col = 1;
    } else if (is_new_line(_chunk[0])) {
      walk(1);
      _line++, _col = 1;
    } else if (is_white_space(_chunk[0])) {
      walk(1);
    } else {
      break;
    }
  }
}

Token Lexer::do_get_token() {
  skip_white_spaces();

  if (_left == 0) {
    return {_line, kTokenEof, "EOF"};
  }

  switch (_chunk[0]) {
    case ';':
      walk(1);
      return {_line, kTokenSemi, ";"};
    case ',':
      walk(1);
      return {_line, kTokenOpComma, ","};
    case '(':
      walk(1);
      return {_line, kTokenLeftParen, "("};
    case ')':
      walk(1);
      return {_line, kTokenRightParen, ")"};
    case '[':
      walk(1);
      return {_line, kTokenLeftBrack, "["};
    case ']':
      walk(1);
      return {_line, kTokenRightBrack, "]"};
    case '{':
      walk(1);
      return {_line, kTokenLeftCurly, "{"};
    case '}':
      walk(1);
      return {_line, kTokenRightCurly, "}"};
    case ':':
      walk(1);
      return {_line, kTokenColon, ":"};
    case '\\':
      walk(1);
      return {_line, kTokenCont, "\\"};
    case '~':
      walk(1);
      return {_line, kTokenOpBitNot, "~"};
    case '?':
      walk(1);
      return {_line, kTokenQuestion, "?"};
    case '#':
      walk(1);
      return {_line, kTokenPreprocess, "#"};
    case '%':
      if (test("%=")) {
        walk(2);
        return {_line, kTokenOpModAssign, "%="};
      }
      walk(1);
      return {_line, kTokenOpReminder, "%"};
    case '\'': {
      walk(1);
      char c = scanChar();
      assert(_left > 0 && _chunk[0] == '\'');
      walk(1);
      return {_line, kTokenLiteralChar, std::string(1, c)};
    }
    case '|':
      if (test("||")) {
        walk(2);
        return {_line, kTokenOpLogicOr, "||"};
      } else if (test("|=")) {
        walk(2);
        return {_line, kTokenOpBitOrAssign, "|="};
      } else {
        walk(1);
        return {_line, kTokenOpBitOr, "|"};
      }
    case '.':
      if (_left == 1 || !is_digit(_chunk[1])) {
        walk(1);
        return {_line, kTokenMemberAccess, "."};
      }
      break;
    case '+':
      if (test("++")) {
        walk(2);
        return {_line, kTokenSelfInc, "++"};
      } else if (test("+=")) {
        walk(2);
        return {_line, kTokenOpAddAssign, "+="};
      } else {
        walk(1);
        return {_line, kTokenPlus, "+"};
      }
    case '-':
      if (test("--")) {
        walk(2);
        return {_line, kTokenSelfDec, "--"};
      } else if (test("->")) {
        walk(2);
        return {_line, kTokenOpPointer, "->"};
      } else if (test("-=")) {
        walk(2);
        return {_line, kTokenOpSubAssign, "-="};
      } else {
        walk(1);
        return {_line, kTokenMinus, "-"};
      }
    case '!':
      if (test("!=")) {
        walk(2);
        return {_line, kTokenOpNotEq, "!="};
      } else {
        walk(1);
        return {_line, kTokenOpLogicNot, "!"};
      }
    case '*':
      if (test("*=")) {
        walk(2);
        return {_line, kTokenOpMultAssign, "*="};
      } else {
        walk(1);
        return {_line, kTokenStar, "*"};
      }
    case '/':
      if (test("/=")) {
        walk(1);
        return {_line, kTokenOpDivAssign, "/="};
      } else {
        walk(1);
        return {_line, kTokenOpDiv, "/"};
      }
    case '=':
      if (test("==")) {
        walk(2);
        return {_line, kTokenOpEqual, "=="};
      } else {
        walk(1);
        return {_line, kTokenOpAssign, "="};
      }
    case '<':
      if (test("<<=")) {
        walk(3);
        return {_line, kTokenOpShiftLeftAssign, "<<="};
      } else if (test("<=")) {
        walk(2);
        return {_line, kTokenOpLessEq, "<="};
      } else if (test("<<")) {
        walk(2);
        return {_line, kTokenOpShiftLeft, "<<"};
      } else {
        walk(1);
        return {_line, kTokenOpLess, "<"};
      }
    case '>':
      if (test(">>=")) {
        walk(3);
        return {_line, kTokenOpShiftRightAssign, ">>="};
      } else if (test(">=")) {
        walk(2);
        return {_line, kTokenOpGreatEq, ">="};
      } else if (test(">>")) {
        walk(2);
        return {_line, kTokenOpShiftRight, ">>"};
      } else {
        walk(1);
        return {_line, kTokenOpGreat, ">"};
      }
    case '^':
      if (test("^=")) {
        walk(2);
        return {_line, kTokenOpBitXorAssign, "^="};
      } else {
        walk(1);
        return {_line, kTokenOpBitXor, "^"};
      }
    case '&':
      if (test("&=")) {
        walk(2);
        return {_line, kTokenOpBitAndAssign, "&="};
      } else if (test("&&")) {
        walk(2);
        return {_line, kTokenOpLogicAnd, "&&"};
      } else {
        walk(1);
        return {_line, kTokenAmp, "&"};
      }
    case '"':
      return {_line, kTokenLiteralString, scanString()};
  }
  char c = _chunk[0];
  if (c == '_' || is_lattin(c)) {
    std::string s = scan(reIdentifier);
    _col += s.size();
    if (keywords.count(s)) 
      return {_line, keywords[s], s};
    return {_line, kTokenIdentifier, s};
  }

  if (c == '.' || is_digit(c)) {
    std::string num = scan(reNumber);
    _col += num.size();
    return {_line, kTokenLiteraltInteger, num};
  }

  panic("unknown char");
  return {0, kTokenEof, ""};
}

Token Lexer::next_token() {
  if (_cached) {
    _cached = false;
    return _peek = _cache_token;
  }
  return _peek = do_get_token();
}

char Lexer::scanChar() {
  if (_chunk[0] != '\\') {
    walk(1);
    return _chunk[-1];
  }

  switch (_chunk[1]) {
    case 'a':
      walk(2);
      return '\a';
    case 'b':
      walk(2);
      return '\b';
    case 'f':
      walk(2);
      return '\f';
    case 'n':
      walk(2);
      return '\n';
    case 'r':
      walk(2);
      return '\r';
    case 't':
      walk(2);
      return '\t';
    case 'v':
      walk(2);
      return '\v';
    case '"':
      walk(2);
      return '"';
    case '\'':
      walk(2);
      return '\'';
    case '\\':
      walk(2);
      return '\\';
    case '0':
      walk(2);
      return '\0';
    case 'x': {
      walk(2);
      int a = to_hex(_chunk[0]);
      walk(1);
      if (_left > 0 && to_hex(_chunk[0]) >= 0) {
        a = a * 16 + to_hex(_chunk[0]);
        walk(1);
      }
      return a;
    }
    default:
      break;
  }
  panic(__FUNCTION__);
  return '\0';
}

std::string Lexer::scanString() {
  std::string r;
  walk(1);
  while (!test("\"")) {
    r += scanChar();
  }
  assert(_chunk[0] == '"');
  walk(1);
  return r;
}

std::string Lexer::scan(const std::regex& reg) {
  std::cmatch m;

  bool ok = std::regex_search(_chunk, m, reg);
  if (ok && m.length() > 0) {
    walk(m.length());
    return m.str();
  }
  return "";
}

Token Lexer::next_token_of_kind(int kind) {
  Token t = next_token();
  if (t.kind != kind) {
    panic("");
  }
  return t;
}

Token Lexer::next_identifier() { return next_token_of_kind(kTokenIdentifier); }

int Lexer::look_ahead() {
  if (!_cached) {
    _cache_token = do_get_token();
    _cached = true;
  }
  return _cache_token.kind;
}
