/*
Copyright 2024 MySQL Parser Project

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package sqlparser

import (
	"errors"
	"fmt"
	"strings"
)

// Token represents a SQL token
type Token int

// Token constants
const (
	// Special tokens
	EOF Token = iota
	ERROR
	COMMENT
	COMMENT_KEYWORD
	ID
	HEX
	STRING
	INTEGRAL
	FLOAT
	HEXNUM
	VALUE_ARG
	LIST_ARG
	NULL
	TRUE
	FALSE

	// Operators
	LE     // <=
	GE     // >=
	NE     // !=
	NULL_SAFE_EQUAL // <=>
	SHIFT_LEFT      // <<
	SHIFT_RIGHT     // >>
	DIV_INT         // DIV
	MOD             // %
	BIT_AND         // &
	BIT_OR          // |
	BIT_XOR         // ^
	PLUS            // +
	MINUS           // -
	MULT            // *
	DIV             // /

	// Logical operators
	AND
	OR
	NOT
	BETWEEN
	CASE
	WHEN
	THEN
	ELSE
	END
	IF
	ELSEIF

	// Comparison
	EQ // =
	LT // <
	GT // >
	LIKE
	REGEXP
	IN
	IS
	EXISTS

	// Keywords
	SELECT
	INSERT
	UPDATE
	DELETE
	FROM
	WHERE
	GROUP
	HAVING
	ORDER
	BY
	LIMIT
	OFFSET
	FOR
	ALL
	DISTINCT
	AS
	EXIST
	ASC
	DESC
	INTO
	DUPLICATE
	KEY
	DEFAULT
	SET
	LOCK
	KEYS
	VALUES
	LAST_INSERT_ID
	NEXT
	VALUE
	SHARE
	MODE
	SQL_NO_CACHE
	SQL_CACHE
	JOIN
	STRAIGHT_JOIN
	LEFT
	RIGHT
	INNER
	OUTER
	CROSS
	NATURAL
	USE
	FORCE
	ON
	USING
	UNION
	INTERSECT
	EXCEPT
	CREATE
	ALTER
	DROP
	RENAME
	ANALYZE
	ADD
	SCHEMA
	TABLE
	INDEX
	VIEW
	TO
	IGNORE
	IF_EXISTS
	IF_NOT_EXISTS
	REPLACE
	DISTINCTROW
	HIGH_PRIORITY
	LOW_PRIORITY
	DELAYED
	QUICK
	SQL_SMALL_RESULT
	SQL_BIG_RESULT
	SQL_BUFFER_RESULT
	SQL_CALC_FOUND_ROWS
	MATCH
	AGAINST
	BINARY
	COLLATE
	CHARSET
	CHARACTER
	INTERVAL
	REGEX
	NULLS
	FIRST
	LAST
	UNKNOWN

	// DDL Keywords
	COLUMN
	CONSTRAINT
	PRIMARY
	FOREIGN
	REFERENCES
	UNIQUE
	CHECK
	AUTO_INCREMENT
	UNSIGNED
	ZEROFILL
	ENGINE
	TEMPORARY
	MODIFY
	CHANGE
	AFTER
	CASCADE
	RESTRICT
	TRUNCATE

	// Transaction Control Keywords
	BEGIN
	COMMIT
	ROLLBACK

	// Data Types
	INT
	INTEGER
	BIGINT
	SMALLINT
	TINYINT
	MEDIUMINT
	DECIMAL
	NUMERIC
	FLOAT_TYPE
	DOUBLE
	REAL
	BIT
	BOOLEAN
	BOOL
	CHAR
	VARCHAR
	TEXT
	TINYTEXT
	MEDIUMTEXT
	LONGTEXT
	VARBINARY
	BLOB
	TINYBLOB
	MEDIUMBLOB
	LONGBLOB
	DATE
	TIME
	DATETIME
	TIMESTAMP
	YEAR
	JSON
	GEOMETRY
	POINT
	LINESTRING
	POLYGON
	ENUM

	// Punctuation
	LPAREN    // (
	RPAREN    // )
	COMMA     // ,
	SEMICOLON // ;
	DOT       // .
	EQUAL     // =
	LESS      // <
	GREATER   // >
	EXCLAM    // !
	TILDE     // ~
	QUESTION  // ?
	PLUS_SIGN // +
	MINUS_SIGN // -
	MULT_SIGN  // *
	DIV_SIGN   // /
	MOD_SIGN   // %
	AMP        // &
	PIPE       // |
	CARET      // ^
	LBRACE     // {
	RBRACE     // }
)

// Tokenizer is the struct used to generate SQL tokens for the parser
type Tokenizer struct {
	buf         string
	pos         int
	lastChar    uint16
	lastToken   Token
	lastError   error
	posVarIndex int
	ParseTree   Statement
	PartialDDL  Statement
	// For token pushback
	lastTokenType int
	lastTokenValue string
	hasPushedBack bool
}

// NewStringTokenizer creates a new Tokenizer for the sql string
func NewStringTokenizer(sql string) *Tokenizer {
	return &Tokenizer{
		buf: sql,
	}
}

// keywords is a map of SQL keywords to their token values
var keywords = map[string]Token{
	"select":     SELECT,
	"insert":     INSERT,
	"update":     UPDATE,
	"delete":     DELETE,
	"from":       FROM,
	"where":      WHERE,
	"group":      GROUP,
	"having":     HAVING,
	"order":      ORDER,
	"by":         BY,
	"limit":      LIMIT,
	"offset":     OFFSET,
	"for":        FOR,
	"all":        ALL,
	"distinct":   DISTINCT,
	"as":         AS,
	"exist":      EXIST,
	"asc":        ASC,
	"desc":       DESC,
	"into":       INTO,
	"duplicate":  DUPLICATE,
	"key":        KEY,
	"default":    DEFAULT,
	"set":        SET,
	"lock":       LOCK,
	"keys":       KEYS,
	"values":     VALUES,
	"join":       JOIN,
	"left":       LEFT,
	"right":      RIGHT,
	"inner":      INNER,
	"outer":      OUTER,
	"cross":      CROSS,
	"natural":    NATURAL,
	"use":        USE,
	"force":      FORCE,
	"on":         ON,
	"using":      USING,
	"union":      UNION,
	"intersect":  INTERSECT,
	"except":     EXCEPT,
	"create":     CREATE,
	"alter":      ALTER,
	"drop":       DROP,
	"rename":     RENAME,
	"analyze":    ANALYZE,
	"add":        ADD,
	"schema":     SCHEMA,
	"table":      TABLE,
	"index":      INDEX,
	"view":       VIEW,
	"to":         TO,
	"ignore":     IGNORE,
	"replace":    REPLACE,
	"and":        AND,
	"or":         OR,
	"not":        NOT,
	"between":    BETWEEN,
	"case":       CASE,
	"when":       WHEN,
	"then":       THEN,
	"else":       ELSE,
	"end":        END,
	"if":         IF,
	"like":       LIKE,
	"regexp":     REGEXP,
	"in":         IN,
	"is":         IS,
	"exists":     EXISTS,
	"null":       NULL,
	"true":       TRUE,
	"false":      FALSE,
	"div":        DIV_INT,
	"mod":        MOD,
	"match":      MATCH,
	"against":    AGAINST,
	"binary":     BINARY,
	"collate":    COLLATE,
	"charset":    CHARSET,
	"character":  CHARACTER,
	"interval":   INTERVAL,
	"regex":      REGEX,
	"nulls":      NULLS,
	"first":      FIRST,
	"last":       LAST,
	"unknown":    UNKNOWN,
	// DDL specific keywords
	"column":     COLUMN,
	"constraint": CONSTRAINT,
	"primary":    PRIMARY,
	"foreign":    FOREIGN,
	"references": REFERENCES,
	"unique":     UNIQUE,
	"check":      CHECK,
	"auto_increment": AUTO_INCREMENT,
	"unsigned":   UNSIGNED,
	"zerofill":   ZEROFILL,
	"engine":     ENGINE,
	"temporary":  TEMPORARY,
	"modify":     MODIFY,
	"change":     CHANGE,
	"after":      AFTER,
	"cascade":    CASCADE,
	"restrict":   RESTRICT,
	"truncate":   TRUNCATE,
	// Transaction control keywords
	"begin":      BEGIN,
	"commit":     COMMIT,
	"rollback":   ROLLBACK,
	// Data types
	"int":        INT,
	"integer":    INTEGER,
	"bigint":     BIGINT,
	"smallint":   SMALLINT,
	"tinyint":    TINYINT,
	"mediumint":  MEDIUMINT,
	"decimal":    DECIMAL,
	"numeric":    NUMERIC,
	"double":     DOUBLE,
	"real":       REAL,
	"boolean":    BOOLEAN,
	"bool":       BOOL,
	"char":       CHAR,
	"varchar":    VARCHAR,
	"text":       TEXT,
	"tinytext":   TINYTEXT,
	"mediumtext": MEDIUMTEXT,
	"longtext":   LONGTEXT,
	"varbinary":  VARBINARY,
	"blob":       BLOB,
	"tinyblob":   TINYBLOB,
	"mediumblob": MEDIUMBLOB,
	"longblob":   LONGBLOB,
	"datetime":   DATETIME,
	"year":       YEAR,
	"json":       JSON,
	"geometry":   GEOMETRY,
	"point":      POINT,
	"linestring": LINESTRING,
	"polygon":    POLYGON,
	"enum":       ENUM,
}

// Scan scans the tokenizer for the next token and returns the token type and value
func (tkn *Tokenizer) Scan() (int, string) {
	if tkn.lastError != nil {
		return int(ERROR), tkn.lastError.Error()
	}

	// Check if we have a pushed back token
	if tkn.hasPushedBack {
		tkn.hasPushedBack = false
		return tkn.lastTokenType, tkn.lastTokenValue
	}

	tkn.skipBlank()
	if tkn.pos >= len(tkn.buf) {
		return 0, ""
	}

	ch := tkn.buf[tkn.pos]
	tkn.pos++

	var tokenType int
	var tokenValue string

	switch ch {
	case ' ', '\t', '\n', '\r':
		return tkn.Scan()
	case '(':
		tokenType, tokenValue = int(LPAREN), string(ch)
	case ')':
		tokenType, tokenValue = int(RPAREN), string(ch)
	case ',':
		tokenType, tokenValue = int(COMMA), string(ch)
	case ';':
		tokenType, tokenValue = int(SEMICOLON), string(ch)
	case '.':
		tokenType, tokenValue = int(DOT), string(ch)
	case '+':
		tokenType, tokenValue = int(PLUS), string(ch)
	case '-':
		if tkn.pos < len(tkn.buf) && tkn.buf[tkn.pos] == '-' {
			return tkn.scanCommentType1("--")
		}
		tokenType, tokenValue = int(MINUS), string(ch)
	case '*':
		tokenType, tokenValue = int(MULT), string(ch)
	case '/':
		if tkn.pos < len(tkn.buf) && tkn.buf[tkn.pos] == '*' {
			return tkn.scanCommentType2()
		}
		tokenType, tokenValue = int(DIV), string(ch)
	case '%':
		tokenType, tokenValue = int(MOD), string(ch)
	case '&':
		tokenType, tokenValue = int(BIT_AND), string(ch)
	case '|':
		tokenType, tokenValue = int(BIT_OR), string(ch)
	case '^':
		tokenType, tokenValue = int(BIT_XOR), string(ch)
	case '~':
		tokenType, tokenValue = int(TILDE), string(ch)
	case '?':
		tokenType, tokenValue = int(QUESTION), string(ch)
	case '=':
		tokenType, tokenValue = int(EQ), string(ch)
	case '<':
		switch {
		case tkn.pos < len(tkn.buf) && tkn.buf[tkn.pos] == '=':
			tkn.pos++
			if tkn.pos < len(tkn.buf) && tkn.buf[tkn.pos] == '>' {
				tkn.pos++
				tokenType, tokenValue = int(NULL_SAFE_EQUAL), "<=>"
			} else {
				tokenType, tokenValue = int(LE), "<="
			}
		case tkn.pos < len(tkn.buf) && tkn.buf[tkn.pos] == '<':
			tkn.pos++
			tokenType, tokenValue = int(SHIFT_LEFT), "<<"
		case tkn.pos < len(tkn.buf) && tkn.buf[tkn.pos] == '>':
			tkn.pos++
			tokenType, tokenValue = int(NE), "<>"
		default:
			tokenType, tokenValue = int(LT), string(ch)
		}
	case '>':
		switch {
		case tkn.pos < len(tkn.buf) && tkn.buf[tkn.pos] == '=':
			tkn.pos++
			tokenType, tokenValue = int(GE), ">="
		case tkn.pos < len(tkn.buf) && tkn.buf[tkn.pos] == '>':
			tkn.pos++
			tokenType, tokenValue = int(SHIFT_RIGHT), ">>"
		default:
			tokenType, tokenValue = int(GT), string(ch)
		}
	case '!':
		if tkn.pos < len(tkn.buf) && tkn.buf[tkn.pos] == '=' {
			tkn.pos++
			tokenType, tokenValue = int(NE), "!="
		} else {
			tokenType, tokenValue = int(EXCLAM), string(ch)
		}
	case '\'':
		return tkn.scanString('\'')
	case '"':
		return tkn.scanString('"')
	case '`':
		return tkn.scanLiteralIdentifier()
	case '#':
		return tkn.scanCommentType1("#")
	default:
		if isLetter(ch) {
			return tkn.scanIdentifier()
		}
		if isDigit(ch) {
			return tkn.scanNumber(false)
		}
		tkn.lastError = fmt.Errorf("illegal character: %c", ch)
		return int(ERROR), string(ch)
	}

	// Save token for potential pushback
	tkn.lastTokenType = tokenType
	tkn.lastTokenValue = tokenValue
	return tokenType, tokenValue
}

// skipBlank skips whitespace
func (tkn *Tokenizer) skipBlank() {
	for tkn.pos < len(tkn.buf) {
		ch := tkn.buf[tkn.pos]
		if ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' {
			tkn.pos++
		} else {
			break
		}
	}
}

// scanIdentifier scans an identifier
func (tkn *Tokenizer) scanIdentifier() (int, string) {
	start := tkn.pos - 1
	for tkn.pos < len(tkn.buf) {
		ch := tkn.buf[tkn.pos]
		if isLetter(ch) || isDigit(ch) || ch == '_' || ch == '$' {
			tkn.pos++
		} else {
			break
		}
	}
	value := tkn.buf[start:tkn.pos]
	lowerValue := strings.ToLower(value)
	var tokenType int
	if token, ok := keywords[lowerValue]; ok {
		tokenType = int(token)
	} else {
		tokenType = int(ID)
	}
	
	// Save token for potential pushback
	tkn.lastTokenType = tokenType
	tkn.lastTokenValue = value
	return tokenType, value
}

// scanString scans a string literal
func (tkn *Tokenizer) scanString(delim byte) (int, string) {
	start := tkn.pos
	for tkn.pos < len(tkn.buf) {
		ch := tkn.buf[tkn.pos]
		tkn.pos++
		if ch == delim {
			if tkn.pos < len(tkn.buf) && tkn.buf[tkn.pos] == delim {
				// Escaped delimiter
				tkn.pos++
			} else {
				return int(STRING), tkn.buf[start-1 : tkn.pos]
			}
		} else if ch == '\\' && tkn.pos < len(tkn.buf) {
			// Escape sequence
			tkn.pos++
		}
	}
	tkn.lastError = errors.New("unterminated string")
	return int(ERROR), tkn.buf[start-1:]
}

// scanLiteralIdentifier scans a backtick-quoted identifier
func (tkn *Tokenizer) scanLiteralIdentifier() (int, string) {
	start := tkn.pos
	for tkn.pos < len(tkn.buf) {
		ch := tkn.buf[tkn.pos]
		tkn.pos++
		if ch == '`' {
			if tkn.pos < len(tkn.buf) && tkn.buf[tkn.pos] == '`' {
				// Escaped backtick
				tkn.pos++
			} else {
				return int(ID), tkn.buf[start-1 : tkn.pos]
			}
		}
	}
	tkn.lastError = errors.New("unterminated quoted identifier")
	return int(ERROR), tkn.buf[start-1:]
}

// scanNumber scans a number
func (tkn *Tokenizer) scanNumber(seenDecimalPoint bool) (int, string) {
	start := tkn.pos - 1
	token := INTEGRAL

	for tkn.pos < len(tkn.buf) {
		ch := tkn.buf[tkn.pos]
		if isDigit(ch) {
			tkn.pos++
		} else if ch == '.' && !seenDecimalPoint {
			token = FLOAT
			seenDecimalPoint = true
			tkn.pos++
		} else if (ch == 'e' || ch == 'E') && !seenDecimalPoint {
			token = FLOAT
			tkn.pos++
			if tkn.pos < len(tkn.buf) && (tkn.buf[tkn.pos] == '+' || tkn.buf[tkn.pos] == '-') {
				tkn.pos++
			}
		} else {
			break
		}
	}

	return int(token), tkn.buf[start:tkn.pos]
}

// scanCommentType1 scans a comment that starts with -- or #
func (tkn *Tokenizer) scanCommentType1(prefix string) (int, string) {
	start := tkn.pos - len(prefix)
	if start < 0 {
		start = 0
	}
	for tkn.pos < len(tkn.buf) {
		if tkn.buf[tkn.pos] == '\n' {
			tkn.pos++
			break
		}
		tkn.pos++
	}
	return int(COMMENT), tkn.buf[start:tkn.pos]
}

// scanCommentType2 scans a comment that starts with /*
func (tkn *Tokenizer) scanCommentType2() (int, string) {
	start := tkn.pos - 1
	tkn.pos++ // Skip the '*'
	for tkn.pos < len(tkn.buf)-1 {
		if tkn.buf[tkn.pos] == '*' && tkn.buf[tkn.pos+1] == '/' {
			tkn.pos += 2
			return int(COMMENT), tkn.buf[start:tkn.pos]
		}
		tkn.pos++
	}
	tkn.lastError = errors.New("unterminated comment")
	return int(ERROR), tkn.buf[start:]
}

// isLetter checks if a character is a letter
func isLetter(ch byte) bool {
	return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '_'
}

// isDigit checks if a character is a digit
func isDigit(ch byte) bool {
	return ch >= '0' && ch <= '9'
}

// Error returns the last error
func (tkn *Tokenizer) Error(s string) {
	tkn.lastError = errors.New(s)
}

// Position returns the current position
func (tkn *Tokenizer) Position() int {
	return tkn.pos
}

// PushBack pushes back the last token
func (tkn *Tokenizer) PushBack() {
	tkn.hasPushedBack = true
}

// Reset resets the tokenizer
func (tkn *Tokenizer) Reset() {
	tkn.pos = 0
	tkn.lastError = nil
	tkn.lastToken = 0
	tkn.hasPushedBack = false
}