package mynl

import (
	"fmt"
)

/////////////////////////////////////////////////////////////////////////
// 词法分析
// 本节没有提供词法分析器，直接提供了一个Token串。
// 语法分析程序可以从Token串中依次读出一个个Token，也可以重新定位Token串的当前读取位置。

//Token的类型
type TokenKind int

const (
	TokenKind_Keyword TokenKind = iota
	TokenKind_Identifier
	TokenKind_StringLiteral
	TokenKind_Seperator
	TokenKind_Operator
	TokenKind_EOF
)

func (selfx *TokenKind) String() string {
	var x = []string{"Keyword", "Identifier", "StringLiteral", "Seperator", "Operator", "EOF"}
	return x[*selfx]
}

// 代表一个Token的数据结构
type Token struct {
	Kind TokenKind
	Text string
}

func NewToken(Kind TokenKind, Text string) *Token {
	return &Token{
		Kind: Kind,
		Text: Text,
	}
}

/**
 * 一个字符串流。其操作为：
 * peek():预读下一个字符，但不移动指针；
 * next():读取下一个字符，并且移动指针；
 * eof():判断是否已经到了结尾。
 */
type CharStream struct {
	data    string
	pos     int //当前读到的位置
	line    int //行号
	col     int //列号
	prepos  int //记录当前读到的位置
	preline int //记录行号
	precol  int //记录列号
}

func NewCharStream(data string) *CharStream {
	return &CharStream{
		pos:  0,
		line: 1,
		col:  0,
		data: data,
	}
}

func (selfx *CharStream) peek() string {
	if selfx.eof() {
		return ""
	}

	return selfx.data[selfx.pos : selfx.pos+1]
}

func (selfx *CharStream) next() string {
	if selfx.eof() {
		return ""
	}

	selfx.pos++
	var ch = selfx.data[selfx.pos-1 : selfx.pos]
	if ch == "\n" {
		selfx.line++
		selfx.col = 0
	} else {
		selfx.col++
	}
	return ch
}

func (selfx *CharStream) recordPos() {
	selfx.prepos = selfx.pos
	selfx.preline = selfx.line
	selfx.precol = selfx.col
}

func (selfx *CharStream) rollbackPos() {
	selfx.pos = selfx.prepos
	selfx.line = selfx.preline
	selfx.col = selfx.precol
}

func (selfx *CharStream) eof() bool {
	return selfx.pos >= len(selfx.data)
}

/**
 * 词法分析器
 * 语法分析器从这里获取Token。
 * 词法分析器的接口像是一个流，词法解析是按需进行的。
 * 支持下面两个操作：
 * next(): 返回当前的Token，并移向下一个Token。
 * peek(): 预读当前的Token，但不移动当前位置。
 */
type Tokenizer struct {
	stream       CharStream
	nextToken    Token
	preNextToken Token
}

func NewTokenizer(stream CharStream) *Tokenizer {
	var eofToken *Token = NewToken(TokenKind_EOF, "")

	return &Tokenizer{
		nextToken: *eofToken,
		stream:    stream,
	}
}

func (selfx *Tokenizer) Next() *Token {
	if selfx.nextToken.Kind == TokenKind_EOF && !selfx.stream.eof() {
		//在第一次的时候，先parse一个Token
		selfx.nextToken = *selfx.getAToken()
	}
	var lastToken = selfx.nextToken

	//往前走一个Token
	selfx.nextToken = *selfx.getAToken()
	return &lastToken
}

func (selfx *Tokenizer) Peek() *Token {
	if selfx.nextToken.Kind == TokenKind_EOF && !selfx.stream.eof() {
		//在第一次的时候，先parse一个Token
		selfx.nextToken = *selfx.getAToken()
	}
	return &selfx.nextToken
}

func (selfx *Tokenizer) recordPos() {
	selfx.preNextToken = selfx.nextToken
	selfx.stream.recordPos()
}

func (selfx *Tokenizer) rollbackPos() {
	selfx.nextToken = selfx.preNextToken
	selfx.stream.rollbackPos()
}

//从字符串流中获取一个新Token。
func (selfx *Tokenizer) getAToken() *Token {
	selfx.skipWhiteSpaces()
	if selfx.stream.eof() {
		//EOF
		return NewToken(TokenKind_EOF, "")
	} else {
		var ch string = selfx.stream.peek()
		if selfx.isLetter(ch) || selfx.isDigit(ch) {
			//标识符
			return selfx.parseIdentifer()
		} else if ch == "\"" {
			//字符串处理
			return selfx.parseStringLiteral()
		} else if ch == "(" || ch == ")" || ch == "{" ||
			ch == "}" || ch == ";" || ch == "," {
			//分隔符处理，包括括号，花括号，逗号，分号
			selfx.stream.next()
			return NewToken(TokenKind_Seperator, ch)
		} else if ch == "/" {
			//操作符/、/=
			//单行注释符//
			//多行注释符/*
			selfx.stream.next()
			var ch1 = selfx.stream.peek()
			if ch1 == "*" {
				selfx.skipMultipleLineComments()
				return selfx.getAToken()
			} else if ch1 == "/" {
				selfx.skipSingleLineComment()
				return selfx.getAToken()
			} else if ch1 == "=" {
				selfx.stream.next()
				return NewToken(TokenKind_Operator, "/=")
			} else {
				return NewToken(TokenKind_Operator, "/")
			}
		} else if ch == "+" {
			//操作符+、++、+=
			selfx.stream.next()
			var ch1 = selfx.stream.peek()
			if ch1 == "+" {
				selfx.stream.next()
				return NewToken(TokenKind_Operator, "++")
			} else if ch1 == "=" {
				selfx.stream.next()
				return NewToken(TokenKind_Operator, "+=")
			} else {
				return NewToken(TokenKind_Operator, "+")
			}
		} else if ch == "-" {
			//操作符-、--、-=
			selfx.stream.next()
			var ch1 = selfx.stream.peek()
			if ch1 == "-" {
				selfx.stream.next()
				return NewToken(TokenKind_Operator, "--")
			} else if ch1 == "=" {
				selfx.stream.next()
				return NewToken(TokenKind_Operator, "-=")
			} else {
				return NewToken(TokenKind_Operator, "-")
			}
		} else if ch == "*" {
			//操作符*、*=
			selfx.stream.next()
			var ch1 = selfx.stream.peek()
			if ch1 == "=" {
				selfx.stream.next()
				return NewToken(TokenKind_Operator, "*=")
			} else {
				return NewToken(TokenKind_Operator, "*")
			}
		} else {
			if ch == "\n" || ch == "\r" {
				//换行符不处理
			} else {
				//暂时去掉不能识别的字符
				fmt.Println("Unrecognized pattern meeting :", ch+", at", selfx.stream.line, " col: ", selfx.stream.col)
			}
			selfx.stream.next()
			return selfx.getAToken()
		}
	}
}

/**
 * 跳过单行注释
 */
func (selfx *Tokenizer) skipSingleLineComment() {
	//跳过第二个/，第一个之前已经跳过去了。
	selfx.stream.next()

	//往后一直找到回车或者eof
	for {
		if selfx.stream.peek() == "\n" || selfx.stream.eof() {
			break
		}
		selfx.stream.next()
	}
}

/**
 * 跳过多行注释
 */
func (selfx *Tokenizer) skipMultipleLineComments() {
	//跳过*，/之前已经跳过去了。
	selfx.stream.next()

	if !selfx.stream.eof() {
		var ch1 = selfx.stream.next()
		//往后一直找到回车或者eof
		for {
			if selfx.stream.eof() {
				break
			}

			var ch2 = selfx.stream.next()
			if ch1 == "*" && ch2 == "/" {
				return
			}
			ch1 = ch2
		}
	}

	//如果没有匹配上，报错。
	fmt.Println("Failed to find matching */ for multiple line comments at line: ", selfx.stream.line, " col: ", selfx.stream.col)
}

/**
 * 跳过空白字符
 */
func (selfx *Tokenizer) skipWhiteSpaces() {
	for {
		if !selfx.isWhiteSpace(selfx.stream.peek()) {
			break
		}
		selfx.stream.next()
	}
}

/**
 * 字符串字面量。
 * 目前只支持双引号，并且不支持转义。
 */
func (selfx *Tokenizer) parseStringLiteral() *Token {
	var token *Token = NewToken(TokenKind_StringLiteral, "")

	//第一个字符不用判断，因为在调用者那里已经判断过了
	selfx.stream.next()

	//不断读取字符
	for {
		if selfx.stream.eof() || selfx.stream.peek() == "\"" {
			break
		}
		token.Text += selfx.stream.next()
	}

	if selfx.stream.peek() == "\"" {
		//消化掉字符换末尾的引号
		selfx.stream.next()
	} else {
		//找不到结束的引号
		fmt.Println("Expecting an \" at line: ", selfx.stream.line, " col: ", selfx.stream.col)
	}

	return token
}

/**
 * 解析标识符。从标识符中还要挑出关键字。
 */
func (selfx *Tokenizer) parseIdentifer() *Token {
	var token *Token = NewToken(TokenKind_Identifier, "")

	//第一个字符不用判断，因为在调用者那里已经判断过了
	token.Text += selfx.stream.next()

	//读入后序字符
	for {
		if selfx.stream.eof() ||
			!selfx.isLetterDigitOrUnderScore(selfx.stream.peek()) {
			break
		}
		token.Text += selfx.stream.next()
	}

	//识别出关键字，并进行处理
	if token.Text == "function" {
		token.Kind = TokenKind_Keyword
	}

	return token
}

//字母、数字、下划线、点
func (selfx *Tokenizer) isLetterDigitOrUnderScore(ch string) bool {
	return (ch >= "A" && ch <= "Z" ||
		ch >= "a" && ch <= "z" ||
		ch >= "0" && ch <= "9" ||
		ch == "_" || ch == ".")
}

//字母
func (selfx *Tokenizer) isLetter(ch string) bool {
	return (ch >= "A" && ch <= "Z" || ch >= "a" && ch <= "z")
}

//数字
func (selfx *Tokenizer) isDigit(ch string) bool {
	return (ch >= "0" && ch <= "9")
}

//空格
func (selfx *Tokenizer) isWhiteSpace(ch string) bool {
	return (ch == " " || ch == "\t" || ch == "\n" || ch == "\r")
}
