import { createLexer } from "../syntax-parser";

export const sqlTokenizer = createLexer([
  {
    type: "whitespace",
    regexes: [/^(\s+)/],
    ignore: true,
  },
  {
    type: "comment",
    regexes: [
      /^((?:#|--).*?(?:\n|$))/, // # --
      /^(\/\*[^]*?(?:\*\/|$))/, // /* */
    ],
    ignore: true,
  },
  {
    type: "udflocation",
    regexes: [
      /^((FILE|JAR)\s+[a-zA-Z0-9_\-\/:\.]+)/i, // word
    ],
  },
  {
    type: "string",
    regexes: [
      /^((?=")(?:"[^"\\]*(?:\\[\s\S][^"\\]*)*"))/, // ""
      /^((?=')(?:'[^'\\]*(?:\\[\s\S][^'\\]*)*'))/, // ''
      /^((?=`)(?:`[^`\\]*(?:\\[\s\S][^`\\]*)*`))/, // ``
    ],
  },
  {
    type: "special",
    regexes: [
      /^(!=|<>|==|<=|>=|!<|!>|\|\||::|->>|->|~~\*|~~|!~~\*|!~~|~\*|!~\*|!~|\.|,)/, // operators.
      /^(\+|\-|\*|\/|\=)/, // '+' '-' '*' '/' '='.
      /^(\(|\))/, // '(' ')'.
      /^(\<|\>)/, // '<' '>'.
      /^(\[|\])/, // '[' ']'.
    ],
  },
  {
    type: "number",
    regexes: [/^([0-9]+(\.[0-9]+)?|0x[0-9a-fA-F]+|0b[01]+)\b/],
  },
  {
    type: "word",
    regexes: [
      /^([\w\$@]*(((\$\[[\w\(\),\-\+]+\])|(\$\{[\w]+\}))[\w\$@]*)+)/, // ${word} $[monday(yyyyMMdd,-1d)]混合在一个字符串中，不分先后顺序，并且必须要有其中之一
      // /^([\w@]+\$\[[\w\(\),\-\+]+\][\w@]*)/, // word$[monday(yyyyMMdd,-1d)]word* 时间函数
      // /^([\w@]+\$\{[\w]+\}[\w@]*)/, // word${word}word* 时间函数
      /^([a-zA-Z0-9_%-@]+?)(?=[\(\!\=\<\-\~\*,:\)\>\;\[\]\+\/\.])/,
      /^([a-zA-Z0-9_%-@\$]+)/, // word
      // /^(\$\{[\w@]+\})/, // ${word}
      // /^(\$\[[\w\(\),\-]+\])/, // $[monday(yyyyMMdd,-1d)] 时间函数
    ],
  },
]);
