package lexer

import (
	"unicode"

	"gitee.com/yanxingshuyuan/ming/token"
)

type Lexer struct {
	source  []rune // 源码字符串
	pos     int    // 当前位置
	readPos int    // 下一个将要读取的位置，一般有readPos = pos + 1
	ch      rune   // 当前字符
}

func NewLexer(input string) *Lexer {
	s := &Lexer{source: []rune(input)}
	s.next()
	return s
}

func (l *Lexer) oneCharToken(t token.TokenType) token.Token {
	tok := token.Token{Type: t, Literal: []rune{l.ch}}
	l.next()
	return tok
}

func (l *Lexer) twoCharToken(t token.TokenType) token.Token {
	tok := token.Token{Type: t}
	lit := []rune{l.ch}
	l.next()
	lit = append(lit, l.ch)
	l.next()
	tok.Literal = lit
	return tok
}

func (l *Lexer) operatorToken(t1 token.TokenType, t2 token.TokenType, tail rune) token.Token {
	if l.peek() == tail {
		return l.twoCharToken(t2)
	} else {
		return l.oneCharToken(t1)
	}
}

func (l *Lexer) consumeToken(t token.TokenType) token.Token {
	tok := token.Token{Type: t, Literal: []rune{l.ch}}
	l.next()
	return tok
}

func (l *Lexer) NextToken() token.Token {
	l.skipWhiteSpace()

	var tok token.Token

	switch l.ch {
	case 0:
		return l.consumeToken(token.EOF)
	case '【':
		return l.consumeToken(token.TK_左方)
	case '】':
		return l.consumeToken(token.TK_右方)
	case '：':
		return l.consumeToken(token.TK_冒号)
	case '。':
		return l.consumeToken(token.TK_句号)
	case '=':
		return l.operatorToken(token.ASSIGN, token.EQL, '=')
	case '+':
		return l.operatorToken(token.ADD, token.ADD_ASSIGN, '=')
	case '-':
		return l.operatorToken(token.SUB, token.SUB_ASSIGN, '=')
	case '*':
		return l.operatorToken(token.MUL, token.MUL_ASSIGN, '=')
	case '/':
		return l.operatorToken(token.QUO, token.QUO_ASSIGN, '=')
	case '!':
		return l.operatorToken(token.NOT, token.NOT_EQL, '=')
	case '>':
		return l.operatorToken(token.GTR, token.GTR_EQL, '=')
	case '<':
		return l.operatorToken(token.LSS, token.LSS_EQL, '=')
	case ':':
		return l.consumeToken(token.COLON)
	case '(':
		return l.consumeToken(token.LPAREN)
	case ')':
		return l.consumeToken(token.RPAREN)
	case '{':
		return l.consumeToken(token.LBRACE)
	case '}':
		return l.consumeToken(token.RBRACE)
	case ',':
		return l.consumeToken(token.COMMA)
	case ';':
		return l.consumeToken(token.SEMICOLON)
	default:
		if isDigit(l.ch) {
			tok.Type = token.TK_数
			tok.Literal = l.readNumber()
			return tok
		} else if isIdentStart(l.ch) {
			tok.Literal = l.readIdent()
			tok.Type = token.FindIdentType(string(tok.Literal))
			return tok
		} else {
			return l.consumeToken(token.ILLEGAL)
		}
	}
}

// next()会读取下一个token，位置前进一步。
func (s *Lexer) next() {
	if s.readPos >= len(s.source) {
		s.ch = 0
	} else {
		s.ch = s.source[s.readPos]
	}
	s.pos = s.readPos
	s.readPos += 1
}

func (l *Lexer) peek() rune {
	if l.readPos >= len(l.source) {
		return 0
	} else {
		return l.source[l.readPos]
	}
}

func (l *Lexer) skipWhiteSpace() {
	for l.ch == ' ' || l.ch == '\t' || l.ch == '\r' {
		l.next()
	}
}

func isLetter(rn rune) bool {
	return 'a' <= rn && rn <= 'z' || 'A' <= rn && rn <= 'Z' || rn == '_'
}

func isIdentStart(rn rune) bool {
	return isLetter(rn) || unicode.IsLetter(rn)
}

func isIdent(rn rune) bool {
	return isLetter(rn) || isDigit(rn) || unicode.IsLetter(rn)
}

func isDigit(rn rune) bool {
	return '0' <= rn && rn <= '9'
}

func (l *Lexer) readIdent() []rune {
	pos := l.pos
	if l.ch == '令' || l.ch == '为' {
		l.next()
		return l.source[pos:l.pos]
	}
	for isIdent(l.ch) {
		if l.ch == '令' || l.ch == '为' {
			return l.source[pos:l.pos]
		} else {
			l.next()
		}
	}
	return l.source[pos:l.pos]
}

func (l *Lexer) readNumber() []rune {
	pos := l.pos
	for isDigit(l.ch) {
		l.next()
	}
	return l.source[pos:l.pos]
}
