package scanner

import (
	"bytes"
	"fmt"
	"mi/compiler/ast"
	"mi/compiler/diagnostic"
	"mi/compiler/scanner/uni"
	"mi/compiler/token"
	"reflect"
	"strconv"
	"strings"
	"unicode/utf8"
)

type ErrorHandler func(msg *diagnostic.Message, pos int, length int)

type TokenFlags int
const (
	None 				TokenFlags = 0
	// internal
	PrecedingLineBreak  TokenFlags = 1 << iota
	// internal
	PrecedingDocumentComment
	// internal
	Unterminated
	// internal
	ExtendedUnicodeEscape
	Scientific        // e.g. `10e2`
	Decimal           // e.g  `0.123`
	Octal             // e.g. `0777`
	HexSpecifier      // e.g. `0x00000000`
	BinarySpecifier   // e.g. `0b0110010000000000`
	OctalSpecifier    // e.g. `0o777`
	// internal
	ContainsSeparator // e.g. `0b1100_0101`
	// internal
	UnicodeEscape
	// internal
	ContainsInvalidEscape    // e.g. `\uhello`
	// internal
	BinaryOrOctalSpecifier = BinarySpecifier | OctalSpecifier
	// internal
	NumericLiteralFlags = Scientific | Octal | HexSpecifier | BinaryOrOctalSpecifier | ContainsSeparator
	// internal
	TemplateLiteralLikeFlags = ContainsInvalidEscape
)

type Scanner struct {
	text               []byte

	languageVersion    ast.ScriptTarget

	// Current position (end position of text of current token)
	pos                int

	// end of text
	end                int

	// Start position of whitespace before current token
	startPos           int

	// Start position of text of current token
	tokenPos           int

	token              token.Token
	tokenValue         string
	tokenFlags         TokenFlags

	skipTrivia bool
	inDocument bool
	onError    ErrorHandler
}

func (s *Scanner) hello() {
}

type Optional struct {
	LanguageVersion ast.ScriptTarget
	SkipTrivia      bool
	InDocument      bool
	Text            []byte
	OnError         ErrorHandler
	Start           int
	Length          int
}

func NewScanner(funs ... func(opt *Optional)) *Scanner {
	opt := &Optional{
		LanguageVersion: ast.Java8,
		SkipTrivia:      false,
		InDocument:      false,
		Start:           -1,		// zero if not set
		Length:          -1,		// text length if not set
	}
	for _, fun := range funs {
		fun(opt)
	}

	var scanner = new(Scanner)
	scanner.languageVersion = opt.LanguageVersion
	scanner.skipTrivia = opt.SkipTrivia
	scanner.onError = opt.OnError
	scanner.SetText(opt.Text, opt.Start, opt.Length)
	return scanner
}

func (s *Scanner) SetOnError(fun ErrorHandler){
	s.onError = fun
}

func (s *Scanner) error(msg *diagnostic.Message) {
	if s.onError != nil {
		s.onError(msg, -1,0)
	}
}

func (s *Scanner) errorAtPos(msg *diagnostic.Message, pos int, length int) {
	if s.onError != nil {
		s.onError(msg, pos, length)
	}
}

func (s *Scanner) isIdentifierStart(Ch rune) bool {
	return IsIdentifierStart(Ch, s.languageVersion)
}

func (s *Scanner) isIdentifierPart(Ch rune) bool {
	return IsIdentifierPart(Ch, s.languageVersion)
}

func (s *Scanner) scanNumberFragment() string {
	var start = s.pos
	var allowSeparator = false
	var isPreviousTokenSeparator = false
	var underlineStart int
	var result bytes.Buffer

	for s.pos < s.end {
		ch, size := utf8.DecodeRune(s.text[s.pos:])
		if ch == '_' {
			s.tokenFlags |= ContainsSeparator
			if allowSeparator {
				allowSeparator = false
				isPreviousTokenSeparator = true
				result.Write(s.text[start:s.pos])
			} else if isPreviousTokenSeparator {
				s.errorAtPos(diagnostic.M_Multiple_consecutive_numeric_separators_are_not_permitted, s.pos, 1)
			} else {
				s.errorAtPos(diagnostic.M_Numeric_separators_are_not_allowed_here, s.pos, 1)
			}

			start = s.pos
			underlineStart = s.pos
			s.pos += size
			continue
		}

		if IsDigit(ch) {
			allowSeparator = true
			isPreviousTokenSeparator = false
			s.pos += size
			continue
		}

		break
	}

	if isPreviousTokenSeparator {
		s.errorAtPos(diagnostic.M_Numeric_separators_are_not_allowed_here, underlineStart, 1)
	}

	result.Write(s.text[start:s.pos])
	return result.String()
}

func (s *Scanner) scanNumber() (token.Token, string) {
	var start = s.pos
	var mainFragment = s.scanNumberFragment()
	var decimalFragment string
	var scientificFragment string
	if tar := s.peekEqual(0, '.'); tar >= 0 {
		s.tokenFlags |= Decimal
		s.pos = tar
		decimalFragment = s.scanNumberFragment()
	}

	var end = s.pos
	if tar := s.peekCheck(0, func(ch rune) bool { return ch == 'e' || ch == 'E' }); tar >= 0 {
		s.pos = tar
		s.tokenFlags |= Scientific
		if tar := s.peekCheck(0, func(ch rune) bool { return ch == '+' || ch == '-' }); tar >= 0 {
			s.pos = tar
		}

		var preNumericPart = s.pos
		var finalFragment = s.scanNumberFragment()
		if len(finalFragment) == 0 {
			s.error(diagnostic.M_Digit_expected)
		} else {
			scientificFragment = string(s.text[end:preNumericPart]) + finalFragment
			end = s.pos
		}
	}

	var result string
	if s.tokenFlags&ContainsSeparator != 0 {
		result = mainFragment
		if len(decimalFragment) > 0 {
			result += "." + decimalFragment
		}
		if len(scientificFragment) > 0 {
			result += scientificFragment
		}
	} else {
		result = string(s.text[start:end]) // No need to use all the fragments; no _ removal needed
	}

	s.tokenValue = result
	var kind = s.checkNumberSuffix()
	s.checkForIdentifierStartAfterNumericLiteral()
	return kind, s.tokenValue
}

func (s *Scanner) checkForIdentifierStartAfterNumericLiteral() {
	ch, _ := s.peek(s.pos)
	if !s.isIdentifierStart(ch) {
		return
	}

	var identifierStart = s.pos
	var length = len(s.scanIdentifierParts())

	s.errorAtPos(diagnostic.M_An_identifier_or_keyword_cannot_immediately_follow_a_numeric_literal, identifierStart, length)
	s.pos = identifierStart
}

// !!! support underline
func (s *Scanner) scanOctalDigits() string {
	start := s.pos
	for tar := start; tar >= 0; tar = s.peekCheck(0, IsOctalDigit) {
		s.pos = tar
	}
	return string(s.text[start:s.pos])
}

/**
 * Scans the given number of hexadecimal digits in the text,
 * returning -1 if the given number is unavailable.
 */
func (s *Scanner) scanExactNumberOfHexDigits(count int, canHaveSeparators bool) int {
	var valueString = s.scanHexDigits(count, false, canHaveSeparators)
	if len(valueString) > 0 {
		value, err :=  strconv.ParseInt(valueString, 16, 64)
		if err != nil {
			panic(fmt.Sprintf("parse int error, value is (%s).", valueString))
		}
		return int(value)
	}
	return -1
}

/**
 * Scans as many hexadecimal digits as are available in the text,
 * returning "" if the given number of digits was unavailable.
 */
func (s *Scanner) scanMinimumNumberOfHexDigits(count int, canHaveSeparators bool) string {
	return s.scanHexDigits(count, true, canHaveSeparators)
}

func (s *Scanner) scanHexDigits(count int, scanAsManyAsPossible bool, canHaveSeparators bool) string {
	var valueChars []rune
	var allowSeparator = false
	var underlineStart int
	var isPreviousTokenSeparator = false
	for (len(valueChars) < count || scanAsManyAsPossible) && s.pos < s.end {
		ch, size := utf8.DecodeRune(s.text[s.pos:])
		if canHaveSeparators && ch == '_' {
			s.tokenFlags |= ContainsSeparator
			if allowSeparator {
				allowSeparator = false
				isPreviousTokenSeparator = true
			} else if isPreviousTokenSeparator {
				s.errorAtPos(diagnostic.M_Multiple_consecutive_numeric_separators_are_not_permitted, s.pos, 1)
			} else {
				s.errorAtPos(diagnostic.M_Numeric_separators_are_not_allowed_here, s.pos, 1)
			}
			underlineStart = s.pos
			s.pos += size
			continue
		}
		allowSeparator = canHaveSeparators
		if ch >= 'A' && ch <= 'F' {
			ch += 'a' - 'A'
		} else if !(ch >= '0' && ch <= '9' || ch >= 'a' && ch <= 'z') {
			break
		}

		valueChars = append(valueChars, ch)
		s.pos += size
		isPreviousTokenSeparator = false
	}

	if isPreviousTokenSeparator {
		s.errorAtPos(diagnostic.M_Numeric_separators_are_not_allowed_here, underlineStart, 1)
	}
	return string(valueChars)
}

func (s *Scanner) scanString() string {
	ch, size := utf8.DecodeRune(s.text[s.pos:])
	var quote = ch
	s.pos += size

	var contents strings.Builder
	var start = s.pos
	for true {
		if s.pos >= s.end {
			contents.Write(s.text[start:s.pos])
			s.error(diagnostic.M_Unexpected_end_of_text)
			break
		}

		ch, size = utf8.DecodeRune(s.text[s.pos:])
		if ch == quote {
			contents.Write(s.text[start:s.pos])
			s.pos += size
			break
		}
		if ch == '\\' {
			contents.Write(s.text[start:s.pos])
			contents.WriteString(s.scanEscapeSequence())
			start = s.pos
			continue
		}
		if IsLineBreak(ch) {
			contents.Write(s.text[start:s.pos])
			s.error(diagnostic.M_Unterminated_string_literal)
			break
		}
		s.pos += size
	}
	return contents.String()
}

func (s *Scanner) scanEscapeSequence() string {
	s.pos ++
	if s.pos >= s.end {
		s.error(diagnostic.M_Unexpected_end_of_text)
		return ""
	}

	ch, size := utf8.DecodeRune(s.text[s.pos:])
	s.pos += size
	switch ch {
	case '0':
		return "\000"
	case 'b':
		return "\b"
	case 't':
		return "\t"
	case 'n':
		return "\n"
	case 'v':
		return "\v"
	case 'f':
		return "\f"
	case 'r':
		return "\r"
	case '\'':
		return "'"
	case '"':
		return "\""
	case 'u': // '\uDDDD'
		s.tokenFlags |= UnicodeEscape
		return s.scanHexadecimalEscape(4)
	case 'x': // '\xDD'
		return s.scanHexadecimalEscape(2)
	case '\r':
		if tar := s.peekEqual(1, '\n'); tar >= 0 {
			s.pos = tar
		}
		fallthrough
	case '\n', uni.LineSeparator, uni.ParagraphSeparator:
		return ""
	default:
		return string(ch)
	}
}

func (s *Scanner) scanHexadecimalEscape(numDigits int) string {
	var escapedValue = s.scanExactNumberOfHexDigits(numDigits, false)

	if escapedValue >= 0 {
		return strconv.Itoa(escapedValue)
	} else {
		s.error(diagnostic.M_Hexadecimal_digit_expected)
		return ""
	}
}

// Current character is known to be a backslash. Check for Unicode escape of the form '\uXXXX'
// and return code point value if valid Unicode escape is found. Otherwise return -1.
func (s *Scanner) peekUnicodeEscape() rune {
	if s.pos+5 < s.end {
		if tar := s.peekEqual(1, 'u'); tar >= 0 {
			var start = s.pos
			var value = s.scanExactNumberOfHexDigits(4, true)
			s.pos = start
			return rune(value)
		}
	}
	return -1
}

func (s *Scanner) scanIdentifierParts() string {
	var result = ""
	var start = s.pos
	for s.pos < s.end {
		ch, size := utf8.DecodeRune(s.text[s.pos:])
		if s.isIdentifierPart(ch) {
			s.pos += size
		} else if ch == '\\' {
			var ch = s.peekUnicodeEscape()
			if !(ch >= 0 && s.isIdentifierPart(ch)) {
				break
			}

			result += string(s.text[start:s.startPos])
			result += string(ch)
			// Valid Unicode escape is always six characters
			s.pos += 6
			start = s.pos
		} else {
			break
		}
	}
	result += string(s.text[start:s.pos])
	return result
}

func (s *Scanner) getIdentifierToken() token.Token {
	var tok = token.KeywordFromString(s.tokenValue)
	if tok != token.Unknown {
		s.token = tok
		return s.token
	}
	s.token = token.Identifier
	return s.token
}

// The count of calls of peekCheck is 6 times that of PeekEqual
func (s *Scanner) peekCheck(n int, f func(ch rune) bool) int {
	if s.pos >= s.end {
		return -1
	}
	var start = s.pos
	for start < s.end {
		ch, size := utf8.DecodeRune(s.text[start:])
		start += size
		n--
		if n < 0 {
			if f(ch) {
				return start
			}
			break
		}
	}
	return -1
}

func (s *Scanner) peek(pos int) (rune, int) {
	if pos < len(s.text) {
		ch, size := utf8.DecodeRune(s.text[pos:])
		return ch, size
	}

	return 0, 0
}

func (s *Scanner) peekEqual(n int, ch rune) int {
	if s.pos >= s.end {
		return -1
	}
	var start = s.pos
	for start < s.end {
		cur, size := utf8.DecodeRune(s.text[start:])
		start += size
		n--
		if n < 0 {
			if cur == ch {
				return start
			}
			break
		}
	}
	return -1
}

func (s *Scanner) checkNumberSuffix() token.Token {
	ch, size := utf8.DecodeRune(s.text[s.pos:])
	switch ch {
	case 'f', 'F':
		s.pos += size
		return token.FloatLiteral
	case 'd', 'D':
		s.pos += size
		return token.DoubleLiteral
	}

	if s.tokenFlags&Decimal != 0 {
		return token.DoubleLiteral
	}

	switch ch {
	case 'l', 'L':
		s.pos += size
		return token.LongLiteral
	}

	return token.IntLiteral
}

func (s *Scanner) Scan() token.Token {
	s.startPos = s.pos
	s.tokenFlags = None
	for true {
		s.tokenPos = s.pos
		if s.pos >= s.end {
			s.token = token.EndOfFile
			return s.token
		}

		ch, size := utf8.DecodeRune(s.text[s.pos:])
		switch ch {
		case '\n', '\r':
			s.tokenFlags |= PrecedingLineBreak
			if s.skipTrivia && !s.inDocument {
				s.pos += size
				continue
			} else {
				if ch == '\r' && s.peekEqual(1, '\n') > 0 {
					// consume both CR and LF
					s.pos += 2
				} else {
					s.pos += size
				}

				s.token = token.NewLineTrivia
				return s.token
			}
		case '\t', '\v', '\f', ' ':
			if s.skipTrivia {
				s.pos += size
				continue
			} else {
				for tar := s.pos; s.pos < s.end && tar > 0; tar = s.peekCheck(0, IsWhiteSpaceSingleLine) {
					s.pos = tar
				}
				s.token = token.WhitespaceTrivia
				return s.token
			}
		case '!':
			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos = tar
				s.token = token.ExclamationEquals
				return s.token
			}
			s.pos += size
			s.token = token.Exclamation
			return s.token
		case '"':
			s.tokenValue = s.scanString()
			s.token = token.StringLiteral
			return s.token
		case '\'':
			s.tokenValue = s.scanString()
			s.token = token.CharLiteral
			return s.token
		case '%':
			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos = tar
				s.token = token.PercentEquals
				return s.token
			}
			s.pos += size
			s.token = token.Percent
			return s.token
		case '&':
			if tar := s.peekEqual(1, '&'); tar >= 0 {
				s.pos = tar
				s.token = token.AmpersandAmpersand
				return s.token
			}
			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos = tar
				s.token = token.AmpersandEquals
				return s.token
			}
			s.pos += size
			s.token = token.Ampersand
			return s.token
		case '(':
			s.pos += size
			s.token = token.OpenParen
			return s.token
		case ')':
			s.pos += size
			s.token = token.CloseParen
			return s.token
		case '*':
			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos = tar
				s.token = token.AsteriskEquals
				return s.token
			}
			s.pos += 1
			s.token = token.Asterisk
			return s.token
		case '+':
			if tar := s.peekEqual(1, '+'); tar >= 0 {
				s.pos = tar
				s.token = token.PlusPlus
				return s.token
			}
			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos = tar
				s.token = token.PlusEquals
				return s.token
			}
			s.pos += size
			s.token = token.Plus
			return s.token
		case ',':
			s.pos += size
			s.token = token.Comma
			return s.token
		case '-':
			if tar := s.peekEqual(1, '-'); tar >= 0 {
				s.pos = tar
				s.token = token.MinusMinus
				return s.token
			}
			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos = tar
				s.token = token.MinusEquals
				return s.token
			}
			s.pos += size
			s.token = token.Minus
			return s.token
		case '.':
			if s.peekCheck(1, IsDigit) > 0 {
				s.token, s.tokenValue = s.scanNumber()
				return s.token
			}
			if tar := s.peekEqual(1, '.'); tar >= 0 {
				if tar := s.peekEqual(2, '.'); tar >= 0 {
					s.pos = tar
					s.token = token.DotDotDot
					return s.token
				}
			}
			s.pos += size
			s.token = token.Dot
			return s.token
		case '/':
			// In the document context, comments are not allowed to exist
			if s.inDocument {
				s.pos += size
				s.token = token.Slash
				return s.token
			}

			// Single-line comment
			if tar := s.peekEqual(1, '/'); tar >= 0 {
				s.pos = tar

				for s.pos < s.end {
					ch, size = utf8.DecodeRune(s.text[s.pos:])
					if IsLineBreak(ch) {
						break
					}
					s.pos += size
				}

				if s.skipTrivia {
					continue
				} else {
					s.token = token.SingleLineCommentTrivia
					return s.token
				}
			}
			// Multi-line comment
			if tar := s.peekEqual(1, '*'); tar >= 0 {
				s.pos = tar

				if s.peekEqual(0, '*') > 0 && s.peekEqual(1, '/') == -1 {
					s.tokenFlags |= PrecedingDocumentComment
				}

				var commentClosed = false
				for s.pos < s.end {
					ch, size = utf8.DecodeRune(s.text[s.pos:])
					if ch == '*' {
						if tar := s.peekEqual(1, '/'); tar >= 0 {
							s.pos = tar
							commentClosed = true
							break
						}
					}
					if IsLineBreak(ch) {
						s.tokenFlags |= PrecedingLineBreak
					}
					s.pos += size
				}
				if !commentClosed {
					s.error(diagnostic.M_Asterisk_Slash_expected)
				}
				if s.skipTrivia {
					continue
				} else {
					if !commentClosed {
						s.tokenFlags |= Unterminated
						s.token = token.MultiLineCommentTrivia
						return s.token
					}

				}
			}

			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos += tar
				s.token = token.SlashEquals
				return s.token
			}
			s.pos += size
			s.token = token.Slash
			return s.token
		case '0':
			if s.pos + 2 < s.end {
				if tar := s.peekCheck(1, func(ch rune) bool { return ch == 'x' || ch == 'X' }); tar >= 0 {
					s.pos = tar
					s.tokenValue = s.scanMinimumNumberOfHexDigits(1, false)
					if len(s.tokenValue) == 0 {
						s.error(diagnostic.M_Hexadecimal_digit_expected)
						s.tokenValue = "0"
					}
					s.tokenValue = "0x" + s.tokenValue
					s.tokenFlags |= HexSpecifier
					s.token = s.checkNumberSuffix()
					return s.token
				}
			}
			// Try to parse as an octal
			if s.pos + 1 < s.end {
				if s.peekCheck(1, IsOctalDigit) > 0 {
					s.tokenValue = s.scanOctalDigits()
					s.token = s.checkNumberSuffix()
					return s.token
				}
			}
			// This fall-through is a deviation from the EcmaScript grammar. The grammar says that a leading zero
			// can only be followed by an octal digit, a dot, or the end of the number literal. However, we are being
			// permissive and allowing decimal digits of the form 08* and 09* (which many browsers also do).
			fallthrough
		case '1', '2', '3', '4', '5', '6', '7', '8', '9':
			s.token, s.tokenValue = s.scanNumber()
			return s.token
		case ':':
			s.pos += size
			s.token = token.Colon
			return s.token
		case ';':
			s.pos += size
			s.token = token.Semicolon
			return s.token
		case '<':
			if tar := s.peekEqual(1, '<'); tar >= 0 {
				if tar := s.peekEqual(2, '='); tar >= 0 {
					s.pos = tar
					s.token = token.LessThanLessThanEquals
					return s.token
				}
				s.pos = tar
				s.token = token.LessThanLessThan
				return s.token
			}
			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos = tar
				s.token = token.LessThanEquals
				return s.token
			}
			s.pos += size
			s.token = token.LessThan
			return s.token
		case '=':
			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos = tar
				s.token = token.EqualsEquals
				return s.token
			}
			if tar := s.peekEqual(1, '>'); tar >= 0 {
				s.pos = tar
				s.token = token.EqualsGreaterThan
				return s.token
			}
			s.pos += size
			s.token = token.Equals
			return s.token
		case '>':
			s.pos += size
			s.token = token.GreaterThan
			return s.token
		case '?':
			s.pos += size
			s.token = token.Question
			return s.token
		case '[':
			s.pos += size
			s.token = token.OpenBracket
			return s.token
		case ']':
			s.pos += size
			s.token = token.CloseBracket
			return s.token
		case '^':
			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos += size
				s.token = token.CaretEquals
				return s.token
			}
			s.pos += size
			s.token = token.Caret
			return s.token
		case '{':
			s.pos += size
			s.token = token.OpenBrace
			return s.token
		case '|':
			if tar := s.peekEqual(1, '|'); tar >= 0 {
				s.pos = tar
				s.token = token.BarBar
				return s.token
			}
			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos = tar
				s.token = token.BarEquals
				return s.token
			}
			s.pos += size
			s.token = token.Bar
			return s.token
		case '}':
			s.pos += size
			s.token = token.CloseBrace
			return s.token
		case '~':
			s.pos += size
			s.token = token.Tilde
			return s.token
		case '@':
			s.pos += size
			s.token = token.At
			return s.token
		case '\\':
			var ch = s.peekUnicodeEscape()
			if ch >= 0 && s.isIdentifierPart(ch) {
				// \uXXXX length is 6
				s.pos += 6
				s.tokenValue = string(ch) + s.scanIdentifierParts()
				s.token = s.getIdentifierToken()
				return s.token
			}
			s.error(diagnostic.M_Invalid_character)
			s.pos += size
			s.token = token.Unknown
			return s.token
		default:
			if s.isIdentifierStart(ch) {
				s.pos += size
				for tar := s.pos; tar >= 0; tar = s.peekCheck(0, s.isIdentifierPart) {
					s.pos = tar
				}
				s.tokenValue = string(s.text[s.tokenPos:s.pos])
				if s.peekEqual(0, '\\') > 0 {
					s.tokenValue += s.scanIdentifierParts()
				}
				s.token = s.getIdentifierToken()
				return s.token
			} else if IsWhiteSpace(ch) {
				s.pos += size
				continue
			} else if IsLineBreak(ch) {
				s.tokenFlags |= PrecedingLineBreak
				s.pos += size
				continue
			}
			s.error(diagnostic.M_Invalid_character)
			s.pos += size
			s.token = token.Unknown
			return s.token
		}
	}
	return token.Unknown
}

func (s *Scanner) ReScanGreaterToken() token.Token {
	if s.token == token.GreaterThan {
		if tar := s.peekEqual(0, '>'); tar >= 0 {
			if tar := s.peekEqual(1, '>'); tar >= 0 {
				if tar := s.peekEqual(2, '>'); tar >= 0 {
					s.pos = tar
					s.token = token.GreaterThanGreaterThanGreaterThanEquals
					return s.token
				}
				s.pos = tar
				s.token = token.GreaterThanGreaterThanGreaterThan
				return s.token
			}
			if tar := s.peekEqual(1, '='); tar >= 0 {
				s.pos = tar
				s.token = token.GreaterThanGreaterThanEquals
				return s.token
			}
			s.pos = tar
			s.token = token.GreaterThanGreaterThan
			return s.token
		}
		if tar := s.peekEqual(0, '='); tar >= 0 {
			s.pos = tar
			s.token = token.GreaterThanEquals
			return s.token
		}
	}
	return s.token
}

func (s *Scanner) SetText(newText []byte, start int, length int) {
	// full start and length
	if start == -1 {
		start = 0
	}
	if length == -1 {
		s.end = len(newText)
	} else {
		s.end = start + length
	}


	s.text = newText
	s.SetTextPos(start)
}

func (s *Scanner) SetTextPos(textPos int) {
	if textPos < 0 {
		panic("textPos < 0")
	}
	s.pos = textPos
	s.startPos = textPos
	s.tokenPos = textPos
	s.token = token.Unknown
	s.tokenFlags = None
}

func (s *Scanner) SetScriptTarget(scriptTarget ast.ScriptTarget) {
	s.languageVersion = scriptTarget
}

func (s *Scanner) SetInDocument(in bool){
	s.inDocument = in
}

func (s *Scanner) GetStartPos() int {
	return s.startPos
}

func (s *Scanner) GetToken() token.Token {
	return s.token
}

func (s *Scanner) GetTextPos() int {
	return s.pos
}

func (s *Scanner) GetTokenPos() int {
	return s.tokenPos
}

func (s *Scanner) GetTokenText() string {
	return string(s.text[s.tokenPos:s.pos])
}

func (s *Scanner) GetTokenValue() string {
	return s.tokenValue
}

func (s *Scanner) HasPrecedingLineBreak() bool {
	return s.tokenFlags&PrecedingLineBreak != 0
}

func (s *Scanner) HasPrecedingDocumentComment() bool {
	return s.tokenFlags&PrecedingDocumentComment != 0
}

func (s *Scanner) isIdentifier() bool {
	return s.token == token.Identifier
}

func (s *Scanner) IsUnterminated() bool {
	return s.tokenFlags&Unterminated != 0
}

func (s *Scanner) HasExtendedUnicodeEscape() bool {
	return s.tokenFlags&ExtendedUnicodeEscape != 0
}

func (s *Scanner) ScanDocumentToken() token.Token {
	s.startPos = s.pos
	s.tokenPos = s.pos

	if s.pos >= s.end {
		s.token = token.EndOfFile
		return s.token
	}

	ch, size := utf8.DecodeRune(s.text[s.pos:])
	switch ch {
	case '\t', '\v', '\f', ' ':
		s.pos += size
		s.token = token.WhitespaceTrivia
		return s.token
	case '@':
		s.pos += size
		s.token = token.At
		return s.token
	case '`':
		if tar := s.peekEqual(1, '`'); tar >= 0 {
			if tar := s.peekEqual(2, '`'); tar >= 0 {
				s.pos = tar
				s.token = token.BacktickBacktickBacktick
				return s.token
			}
		}
		s.pos += size
		s.token = token.Backtick
		return s.token
	case '.':
		if tar := s.peekEqual(1, '.'); tar >= 0 {
			if tar := s.peekEqual(2, '.'); tar >= 0 {
				s.pos = tar
				s.token = token.DotDotDot
				return s.token
			}
		}
		s.pos += size
		s.token = token.Dot
		return s.token
	case '*':
		if tar := s.peekEqual(1, '*'); tar >= 0 {
			if tar := s.peekEqual(2, '*'); tar >= 0 {
				s.pos = tar
				s.token = token.AsteriskAsteriskAsterisk
				return s.token
			}
			s.pos = tar
			s.token = token.AsteriskAsterisk
			return s.token
		}

		s.pos += size
		s.token = token.Asterisk
		return s.token
	case ':':
		s.pos += size
		s.token = token.Colon
		return s.token
	case '#':
		if tar := s.peekEqual(1, '#'); tar >= 0 {
			if tar := s.peekEqual(2, '#'); tar >= 0 {
				if tar := s.peekEqual(3, '#'); tar >= 0 {
					if tar := s.peekEqual(4, '#'); tar >= 0 {
						s.pos = tar
						s.token = token.HashHashHashHashHash
						return s.token
					}
					s.pos = tar
					s.token = token.HashHashHashHash
					return s.token
				}
				s.pos = tar
				s.token = token.HashHashHash
				return s.token
			}
			s.pos = tar
			s.token = token.HashHash
			return s.token
		}
		s.pos += size
		s.token = token.Hash
		return s.token
	case '[':
		s.pos += size
		s.token = token.OpenBracket
		return s.token
	case ']':
		s.pos += size
		s.token = token.CloseBracket
		return s.token
	case '(':
		s.pos += size
		s.token = token.OpenParen
		return s.token
	case ')':
		s.pos += size
		s.token = token.CloseParen
		return s.token
	case '<':
		s.pos += size
		s.token = token.LessThan
		return s.token
	case '>':
		s.pos += size
		s.token = token.GreaterThan
		return s.token

	// If there is the next character, it returns the next character,
	// otherwise it returns itself. This judgment is relatively loose
	case '\\':
		if s.pos < s.end {
			s.pos += size
			ch2, size2 := utf8.DecodeRune(s.text[s.pos:])
			s.tokenValue = string(ch2)
			s.pos += size2
			s.token = token.StringLiteral
			return s.token
		}

		s.pos += size
		s.tokenValue = "\\"
		s.token = token.StringLiteral
		return s.token
	case '\r', '\n':
		if ch == '\r' && s.peekEqual(1, '\n') > 0 {
			s.pos += 2
		} else {
			s.pos += size
		}
		s.token = token.NewLineTrivia
		return s.token
	case '+':
		if tar := s.peekEqual(1, ' '); tar >= 0 {
			s.pos = tar
			s.token = token.UnorderedListHead
			return s.token
		}
	case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
		for tar := s.pos; tar > 0 ;tar = s.peekCheck(0, IsDigit) {
			s.pos = tar
		}
		if tar := s.peekEqual(0, '.'); tar >= 0 {
			if tar := s.peekEqual(1, ' '); tar >= 0 {
				s.pos = tar
				s.token = token.OrderedListHead
				return s.token
			}
		}

		s.tokenValue = string(s.text[s.tokenPos:s.pos])
		s.token = token.IntLiteral
		return s.token
	}

	if s.isIdentifierStart(ch) {
		s.pos += size
		for tar := s.pos; tar >= 0; tar = s.peekCheck(0, s.isIdentifierPart) {
			s.pos = tar
		}
		s.tokenValue = string(s.text[s.tokenPos:s.pos])
		s.token = s.getIdentifierToken()
		return s.token
	} else {
		s.pos += size
		s.token = token.Unknown
		return s.token
	}
}

func (s *Scanner) speculationHelper(callback func() interface{}, isLookahead bool) interface{} {
	var token = s.token
	var pos = s.pos
	var tokenValue = s.tokenValue
	var startPos = s.startPos
	var tokenPos = s.tokenPos
	var tokenFlags = s.tokenFlags
	var result = callback()
	// If our callback returned something 'falsy' or we're just looking ahead,
	// then unconditionally restore us to where we were.
	if isNil(result) || isLookahead {
		s.token = token
		s.pos = pos
		s.tokenValue = tokenValue
		s.startPos = startPos
		s.tokenPos = tokenPos
		s.tokenFlags = tokenFlags
	}
	return result
}

func (s *Scanner) ScanRange(start int, length int, callback func () interface{} ) interface{} {
	var saveEnd = s.end
	var savePos = s.pos
	var saveStartPos = s.startPos
	var saveTokenPos = s.tokenPos
	var saveToken = s.token
	var saveTokenFlags = s.tokenFlags
	var saveTokenValue = s.tokenValue
	// const saveHasExtendedUnicodeEscape = hasExtendedUnicodeEscape;
	// const saveTokenIsUnterminated = tokenIsUnterminated;
	s.SetText(s.text, start, length)
	var result = callback()

	s.end = saveEnd
	s.pos = savePos
	s.startPos = saveStartPos
	s.tokenPos = saveTokenPos
	s.token = saveToken
	s.tokenFlags = saveTokenFlags
	s.tokenValue = saveTokenValue
	// s.hasExtendedUnicodeEscape = saveHasExtendedUnicodeEscape;
	// s.tokenIsUnterminated = saveTokenIsUnterminated;

	return result
}

func (s *Scanner) TryScan(callback func () interface{}) interface{}{
	return s.speculationHelper(callback, false)
}

func (s *Scanner) LookHead(callback func () interface{}) interface{}{
	return s.speculationHelper(callback, true)
}

func isNil(i interface{}) bool {
	if i == nil {
		return true
	}
	vi := reflect.ValueOf(i)
	if vi.Kind() == reflect.Ptr {
		return vi.IsNil()
	}
	return false
}