package lexer


import "bytes"
import "fmt"
import "regexp"
import "strconv"
import "strings"


var reOpeningLongBracket = regexp.MustCompile(`^\[=*\[`)

var reNewLine = regexp.MustCompile("\r\n|\n\r|\n|\r")

// \s 匹配任何非空白字符
// (?s)即Singleline(单行模式)
// [^...] 不在[]中的字符：[^abc] 匹配除了a,b,c之外的字符
var reShortStr = regexp.MustCompile(`(?s)(^'(\\\\|\\'|\\\n|\\z\s*|[^'\n])*')|(^"(\\\\|\\"|\\\n|\\z\s*|[^"\n])*")`)

var reDecEscapeSeq = regexp.MustCompile(`^\\[0-9]{1, 3}`)

var reHexEscapeSeq = regexp.MustCompile(`^\\x[0-9a-fA-F]{2}`)

var reUnicodeEscapeSeq = regexp.MustCompile(`^\\u\{[0-9a-fA-F]}+\}`)

var reNumber = regexp.MustCompile(`^0[xX][0-9a-fA-F]*(\.[0-9a-fA-F]*)?([pP][+\-]?[0-9]+)?|^[0-9]*(\.[0-9]*)?([eE][+\-]?[0-9]+)?`)

var reIdentifier = regexp.MustCompile(`^[_a-zA-Z][_\d\w]*`)


type Lexer struct {

	chunk		string 		// 源代码

	chunkName	string 		// 源文件名

	line		int			// 当前行号，仅用于词法分析过程出错时生成错误信息

	nextToken 	string 		// 缓存下一个token

	nextTokenKind	int 	// 缓存下一个token的类型

	nextTokenLine 	int 	// 缓存下一个Token的行号

}


// 根据源文件名和源代码创建Lexer结构体实例，并将初始行号设置为1
func NewLexer(chunk, chunkName string) *Lexer {

	return &Lexer{chunk, chunkName, 1, "", 0, 0}

}


// 返回当前行号
func (self *Lexer) Line() int {

	return self.line
}


// 我们通过nextTokenLine字段来判断缓存里是否有下一个touken的信息
// 如果有，则直接返回token的类型即可
// 否则，调用NextToken()方法提取下一个token并缓存起来
func (self *Lexer) LookAhead() int {

	if self.nextTokenLine > 0 {

		return self.nextTokenKind
	}

	currentLine := self.line

	line, kind, token := self.NextToken()

	self.line = currentLine

	self.nextTokenLine = line
	self.nextTokenKind = kind
	self.nextToken = token

	return kind
}


// 提取标识符
func (self *Lexer) NextIdentifier() (line int, token string) {

	return self.NextTokenOfKind(TOKEN_IDENTIFIER)
}


// 提取指定类型的token
func (self *Lexer) NextTokenOfKind(kind int) (line int, token string) {

	line, _kind, token := self.NextToken()

	if kind != _kind {
		self.error("syntax error near '%s'", token)
	}

	return line, token
}


// 跳过空白字符和注释，返回下一个Token(包括行号和类型)
// 如果源代码已经分析完毕，返回表示分析结束的特殊Token
func (self *Lexer) NextToken() (line, kind int, token string) {

	if self.nextTokenLine > 0 {

		line = self.nextTokenLine
		kind = self.nextTokenKind
		token = self.nextToken

		self.line = self.nextTokenLine

		self.nextTokenLine = 0

		return
	}

	self.skipWhiteSpaces()

	if len(self.chunk) == 0 {
		return self.line, TOKEN_EOF, "EOF"
	}

	switch self.chunk[0] {
		case ';': self.next(1); return self.line, TOKEN_SEP_SEMI, ";"

		case ',': self.next(1); return self.line, TOKEN_SEP_COMMA, ","

		case '(': self.next(1); return self.line, TOKEN_SEP_LPAREN, "("

		case ')': self.next(1); return self.line, TOKEN_SEP_RPAREN, ")"

		case ']': self.next(1); return self.line, TOKEN_SEP_RBRACK, "]"

		case '{': self.next(1); return self.line, TOKEN_SEP_LCURLY, "{"

		case '}': self.next(1); return self.line, TOKEN_SEP_RCURLY, "}"

		case '+': self.next(1); return self.line, TOKEN_OP_ADD, "+"

		case '-': self.next(1); return self.line, TOKEN_OP_MINUS, "-"

		case '*': self.next(1); return self.line, TOKEN_OP_MUL, "*"

		case '^': self.next(1); return self.line, TOKEN_OP_POW, "^"

		case '%': self.next(1); return self.line, TOKEN_OP_MOD, "%"

		case '&': self.next(1); return self.line, TOKEN_OP_BAND, "&"

		case '|': self.next(1); return self.line, TOKEN_OP_BOR, "|"

		case '#': self.next(1); return self.line, TOKEN_OP_LEN, "#"

		case ':': 
			if self.test("::") {
				self.next(2); return self.line, TOKEN_SEP_LABEL, "::"
			} else {
				self.next(1); return self.line, TOKEN_SEP_COLON, ":"
			}

		case '/':
			if self.test("//") {
				self.next(2); return self.line, TOKEN_OP_IDIV, "//"
			} else {
				self.next(1); return self.line, TOKEN_OP_DIV, "/"
			}

		case '~':
			if self.test("~=") {
				self.next(2); return self.line, TOKEN_OP_NE, "~="
			} else {
				self.next(1); return self.line, TOKEN_OP_WAVE, "~"
			}

		case '=':
			if self.test("==") {
				self.next(2); return self.line, TOKEN_OP_EQ, "=="
			} else {
				self.next(1); return self.line, TOKEN_OP_ASSIGN, "="
			}

		case '<':
			if self.test("<<") {
				self.next(2); return self.line, TOKEN_OP_SHL, "<<"
			} else if self.test("<=") {
				self.next(2); return self.line, TOKEN_OP_LE, "<="
			} else {
				self.next(1); return self.line, TOKEN_OP_LT, "<"
			}

		case '>':
			if self.test(">>") {
				self.next(2); return self.line, TOKEN_OP_SHR, ">>"
			} else if self.test(">=") {
				self.next(2); return self.line, TOKEN_OP_GE, ">="
			} else {
				self.next(1); return self.line, TOKEN_OP_GT, ">"
			}

		case '.':
			if self.test("...") {
				self.next(3); return self.line, TOKEN_VARARG, "..."
			} else if self.test("..") {
				self.next(2); return self.line, TOKEN_OP_CONCAT, ".."
			} else if len(self.chunk) == 1 || !isDigit(self.chunk[1]) {
				self.next(1); return self.line, TOKEN_SEP_DOT, "."
			}

		case '[':
			if self.test("[[") || self.test("[=") {
				return self.line, TOKEN_STRING, self.scanLognString()
			} else {
				self.next(1); return self.line, TOKEN_SEP_LBRACK, "["
			}

		case '\'', '"':
			return self.line, TOKEN_STRING, self.scanShortString()

	}

	c := self.chunk[0]

	if c == '.' || isDigit(c) {
		token := self.scanNumber()

		return self.line, TOKEN_NUMBER, token
	}

	if c == '_' || isLetter(c) {

		token := self.scanIdentifier()

		if kind, found := keywords[token]; found {
			return self.line, kind, token 	// token
		} else {
			return self.line, TOKEN_IDENTIFIER, token
		}
	}

	self.error("unexpected symbol near %q", c)

	return

}


// 不仅跳过空白字符(更新了行号)，也一并跳过了注释
func (self *Lexer) skipWhiteSpaces() {

	for len(self.chunk) > 0 {
		if self.test("--") {
			self.skipComment()
		} else if self.test("\r\n") || self.test("\n\r") {
			self.next(2)
			self.line += 1
		} else if isNewLine(self.chunk[0]) {
			self.next(1)
			self.line += 1
		} else if isWhiteSpace(self.chunk[0]) {
			self.next(1)
		} else {
			break
		}
	}

}


// 判断剩余的源代码是否以某种字符串开头
func (self *Lexer) test(s string) bool {

	return strings.HasPrefix(self.chunk, s)

}


// 跳过n个字符
func (self *Lexer) next(n int) {

	self.chunk = self.chunk[n:]

}


// 判断字符是否是空白字符
func isWhiteSpace(c byte) bool {

	switch c {
	case '\t', '\n', '\v', '\f', '\r', ' ':
		return true
	}

	return false

}


// 判断字符是否是回车或者换行符
func isNewLine(c byte) bool {

	return c == '\r' || c == '\n'

}


// 跳过注释
func (self *Lexer) skipComment() {

	self.next(2)		// skip --

	if self.test("[") {		// long comment?
		if reOpeningLongBracket.FindString(self.chunk) != "" {
			self.scanLognString()
			return
		}
	}

	// short comment
	for len(self.chunk) > 0 && !isNewLine(self.chunk[0]) {
		self.next(1)
	}

}


// 扫描长字符串
// 先寻找左右长方括号
// 如果任何一个找不到，则说明源代码有语法错误，调用error()方法汇报错误并终止分析
// 接下来提取字符串字面量，把左右长方括号去掉
// 把换行符序列统一转换成换行符\n
// 再把开头第一个换行符(如果有的话)去掉，就是最终的字符串
func (self *Lexer) scanLognString() string {

	openingLongBracket := reOpeningLongBracket.FindString(self.chunk)

	if openingLongBracket == "" {
		self.error("invalid long string delimiter near '%s'", self.chunk[0:2])
	}

	closingLongBracket := strings.Replace(openingLongBracket, "[", "]", -1)

	closingLongBracketIdx := strings.Index(self.chunk, closingLongBracket)

	if closingLongBracketIdx < 0 {
		self.error("unfinished long string or comment")
	}


	str := self.chunk[len(openingLongBracket):closingLongBracketIdx]
	self.next(closingLongBracketIdx + len(closingLongBracket))

	str = reNewLine.ReplaceAllString(str, "\n")
	self.line += strings.Count(str, "\n")

	if len(str) > 0 && str[0] == '\n' {
		str = str[1:]
	}

	return str

}


func (self *Lexer) scanShortString() string {

	if str := reShortStr.FindString(self.chunk); str != "" {
		self.next(len(str))
		str = str[1:len(str)-1]
		if strings.Index(str, `\`) >= 0 {
			self.line += len(reNewLine.FindAllString(str, -1))
			str = self.escape(str)
		} 

		return str
	}

	self.error("unfinished string")

	return ""

}


func (self *Lexer) escape(str string) string {

	var buf bytes.Buffer

	for len(str) > 0 {
		if str[0] != '\\' {
			buf.WriteByte(str[0])
			str = str[1:]
			continue
		}

		if len(str) == 1 {
			self.error("unfinished string")
		}

		switch str[1] {
			case 'a':	buf.WriteByte('\a'); str = str[2:]; continue
			case 'b':	buf.WriteByte('\b'); str = str[2:]; continue
			case 'f':	buf.WriteByte('\f'); str = str[2:]; continue
			case 'n':	buf.WriteByte('\n'); str = str[2:]; continue
			case '\n':	buf.WriteByte('\n'); str = str[2:]; continue
			case 'r':	buf.WriteByte('\r'); str = str[2:]; continue
			case 'v':	buf.WriteByte('\v'); str = str[2:]; continue
			case '"':	buf.WriteByte('"'); str = str[2:]; continue
			case '\'':	buf.WriteByte('\''); str = str[2:]; continue
			case '\\':	buf.WriteByte('\\'); str = str[2:]; continue
			case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': // \ddd
				if found := reDecEscapeSeq.FindString(str); found != "" {
					d, _ := strconv.ParseInt(found[1:], 10, 32)
					if d <= 0xFF {
						buf.WriteByte(byte(d))
						str = str[len(found):]
						continue
					}
					self.error("decimal excape too large near '%s'", found)
				}

			case 'x': // \xXX
				if found := reHexEscapeSeq.FindString(str); str != "" {
					d, _ := strconv.ParseInt(found[2:], 16, 32)
					buf.WriteByte(byte(d))
					str = str[len(found):]
					continue
				}

			case 'u':	// \u{XXX}
				// 用正则表达式提取转义序列，并把它解析为整数
				// 然后调用Go语言标准库提供的方法将Unicode代码点按UTF-8编码为字节序列
				// 如果代码点超出范围，则调用error()函数汇报异常
				if found := reUnicodeEscapeSeq.FindString(str); str != "" {
					d, err := strconv.ParseInt(found[3: len(found)-1], 16, 32)
					if err == nil && d <= 0x10FFFF {
						buf.WriteRune((rune(d)))  // unicode
						str = str[len(found):]
						continue
					}
					self.error("UTF-8 value too large near '%s'", found)
				}

			case 'z':
				// 首先跳过\z这个转义序列
				// 然后跳过紧随其后的空白字符
				str = str[2:]
				for len(str) > 0 && isWhiteSpace(str[0]) {
					str = str[1:]
				}
				continue
		}
	}

	return buf.String()

}


func isDigit(c byte) bool {

	return c >= '0' && c <= '9'
}


func (self *Lexer) scanNumber() string {

	return self.scan(reNumber)
}


func (self *Lexer) scan(re *regexp.Regexp) string {

	if token := re.FindString(self.chunk); token != "" {

		self.next(len(token))

		return token
	}

	panic("unreachable!")
}


func isLetter(c byte) bool {

	return c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z'
}


func (self *Lexer) scanIdentifier() string {

	return self.scan(reIdentifier)
}


// error()方法利用源文件名、当前行号，以及传入的格式和参数抛出错误信息
func (self *Lexer) error(f string, a ...interface{}) {

	err := fmt.Sprintf(f, a...)

	err = fmt.Sprintf("%s:%d: %s", self.chunkName, self.line, err)

	panic(err)

}