package tokenizers

import (
	"fmt"
	"gitee.com/tiger000/taishan_web/utils/conv"

	"strconv"
)

//Tokenizer
/**
 * 词法分析器。
 * 词法分析器的接口像是一个流，词法解析是按需进行的。
 * 支持下面两个操作：
 * Next(): 返回当前的Token，并移向下一个Token。
 * Peek(): 返回当前的Token，但不移动当前位置。
 * Peek2(): 返回当前Token的下一个Token，但不移动当前位置
 */
type Tokenizer struct {
	stream     CharStream
	nextToken  *Token
	nextToken2 *Token
}

func NewTokenizer(stream CharStream) *Tokenizer {
	return &Tokenizer{
		stream: stream,
	}
}
func (that *Tokenizer) Next() Token {
	//在第一次的时候，先parse一个Token
	/*if that.nextToken.Kind == EOF && !that.stream.Eof() {
		that.nextToken = that.getToken()
	}
	lastToken := that.nextToken
	//往前走一个Token
	that.nextToken = that.getToken()*/
	if that.nextToken != nil {
		token := that.nextToken
		that.nextToken = that.nextToken2
		that.nextToken2 = nil
		return *token
	} else {
		return that.getToken()
	}
}

func (that *Tokenizer) Peek() Token {
	if that.nextToken == nil {
		token := that.getToken()
		that.nextToken = &token
	}
	return *that.nextToken

}

func (that *Tokenizer) Peek2() Token {
	if that.nextToken2 == nil {
		if that.nextToken == nil {
			that.Peek()
		}
		token := that.getToken()
		that.nextToken2 = &token
	}
	return *that.nextToken2
}

func (that *Tokenizer) getToken() Token {
	//跳过空格
	that.skipWhiteSpaces()
	//是否到末尾
	if that.stream.Eof() {
		return Token{Kind: EOF, Text: ""}
	}

	ch := that.stream.Peek()
	if ch == '$' {
		that.stream.Next()
		return Token{Kind: Start, Text: "$"}
	} else if that.isLetter(ch) || that.isDigit(ch) || ch == '_' {
		//标识符或关键字
		return that.parseIdentifier()
	} else if ch == '"' {
		//字符串
		return that.parseStringLiteral()
	} else if that.isSeparator(ch) {
		//分隔符
		that.stream.Next()
		return Token{Kind: Separator, Text: string(ch)}
	} else if ch == '/' {
		//注释或除号
		that.stream.Next()
		ch1 := that.stream.Peek()
		if ch1 == '*' {
			that.skipMultipleLineComments()
			return that.getToken()
		} else if ch1 == '/' {
			that.skipSingleLineComment()
			return that.getToken()
		} else if ch1 == '=' {
			that.stream.Next()
			return Token{Kind: Operator, Text: "/="}
		} else {
			return Token{Kind: Operator, Text: "/"}
		}
	} else if ch == '+' {
		that.stream.Next()
		ch1 := that.stream.Peek()
		if ch1 == '+' {
			that.stream.Next()
			return Token{Kind: Operator, Text: "++"}
		} else if ch1 == '=' {
			that.stream.Next()
			return Token{Kind: Operator, Text: "+="}
		} else {
			return Token{Kind: Operator, Text: "+"}
		}
	} else if ch == '-' {
		that.stream.Next()
		ch1 := that.stream.Peek()
		if ch1 == '-' {
			that.stream.Next()
			return Token{Kind: Operator, Text: "--"}
		} else if ch1 == '=' {
			that.stream.Next()
			return Token{Kind: Operator, Text: "-="}
		} else {
			return Token{Kind: Operator, Text: "-"}
		}
	} else if ch == '*' {
		that.stream.Next()
		ch1 := that.stream.Peek()
		if ch1 == '=' {
			that.stream.Next()
			return Token{Kind: Operator, Text: "*="}
		} else {
			return Token{Kind: Operator, Text: "*"}
		}
	} else if that.stream.IsNil(ch) {
		return Token{Kind: EOF, Text: ""}
	} else {
		//暂时去掉不能识别的字符
		fmt.Println("Unrecognized pattern meeting ': ", string(ch), "', at", that.stream.line, " col: ", that.stream.col)
		that.stream.Next()
		return that.getToken()
	}

}

/**
 * 解析标识符。从标识符中还要挑出关键字。
 */
func (that *Tokenizer) parseIdentifier() Token {
	token := Token{Kind: Identifier, Text: ""}

	//第一个字符不用判断，因为在调用者那里已经判断过了
	var byteText = make([]byte, 0)

	byteText = append(byteText, that.stream.Next())

	//读入后序字符
	for that.isLetterDigitOrUnderScore(that.stream.Peek()) && !that.stream.Eof() {
		byteText = append(byteText, that.stream.Next())
	}

	//识别出关键字
	token.Text = conv.Bytes2String(byteText)
	if that.isKeyword(token.Text) {
		token.Kind = Keyword
	}

	return token
}

/**
 * 字符串字面量。
 * 目前只支持双引号，并且不支持转义。
 */
func (that *Tokenizer) parseStringLiteral() Token {
	//第一个字符不用判断，因为在调用者那里已经判断过了
	that.stream.Next()
	byteText := make([]byte, 0)
	for !that.stream.Eof() && that.stream.Peek() != '"' {
		byteText = append(byteText, that.stream.Next())
	}
	if that.stream.Peek() == '"' {
		that.stream.Next()
	} else {
		fmt.Println("Expecting an \" at line: ", that.stream.line, " col: ", that.stream.col)
		panic("Expecting an \" at line: " + strconv.Itoa(that.stream.line) + " col: " + strconv.Itoa(that.stream.col))
	}
	token := Token{
		Kind: StringLiteral,
		Text: "",
	}

	token.Text = conv.Bytes2String(byteText)
	return token
}

/**
 * 跳过单行注释
 */
func (that *Tokenizer) skipSingleLineComment() {
	//跳过第二个/，第一个之前已经跳过去了。
	that.stream.Next()

	//往后一直找到回车或者eof
	for that.stream.Peek() != '\n' && !that.stream.Eof() {
		that.stream.Next()
	}
}

/**
 * 跳过多行注释
 */
func (that *Tokenizer) skipMultipleLineComments() {
	//跳过*，/之前已经跳过去了。
	that.stream.Next()

	if !that.stream.Eof() {
		ch1 := that.stream.Next()
		//往后一直找到回车或者eof
		for !that.stream.Eof() {
			ch2 := that.stream.Next()
			if ch1 == '*' && ch2 == '/' {
				return
			}
			ch1 = ch2
		}
	}

	//如果没有匹配上，报错。
	panic("Failed to find matching */ for multiple line comments at ': " + strconv.Itoa(that.stream.line) + " col: " + strconv.Itoa(that.stream.col))
}

/*
*
跳过空格
*/
func (that *Tokenizer) skipWhiteSpaces() {
	for !that.stream.Eof() {
		if that.isWhiteSpace(that.stream.Peek()) {
			that.stream.Next()
		} else {
			break
		}
	}
}

// 关键词判断
func (that *Tokenizer) isKeyword(keyword string) bool {
	switch keyword {
	case "function":
		return true
	default:
		return false
	}
}
func (that *Tokenizer) isLetterDigitOrUnderScore(ch byte) bool {
	return ch >= 'A' && ch <= 'Z' ||
		ch >= 'a' && ch <= 'z' ||
		ch >= '0' && ch <= '9' ||
		ch == '_'
}

func (that *Tokenizer) isLetter(ch byte) bool {
	return ch >= 'A' && ch <= 'Z' || ch >= 'a' && ch <= 'z'
}

func (that *Tokenizer) isDigit(ch byte) bool {
	return ch >= '0' && ch <= '9'
}

func (that *Tokenizer) isWhiteSpace(ch byte) bool {
	return ch == ' ' || ch == '\n' || ch == '\t'
}
func (that *Tokenizer) isSeparator(ch byte) bool {
	switch ch {
	case '{', '}', '[', ']', '(', ')', ',', ';', '.', ':', '|':
		return true
	default:
		return false
	}
}
