package lexer

import (
	"gum/interpreter/token"
	"strings"
)

type Lexer struct {
	end              bool
	input            string
	ch               uint8 // char
	position         int
	nextPosition     int
	nextNextPosition int
}

func NewLexer(input string) *Lexer {
	l := Lexer{
		false, input, 0, 0, 0, 0,
	}
	l.readChar()
	return &l
}

func (l *Lexer) currentChar() uint8 {
	if l.position >= len(l.input) {
		return 0
	}
	return l.ch // ！！！这里不可以使用的l.input[l.position]的原因是此时的position已经指向下一个字符
}

func (l *Lexer) nextChar() uint8 {
	if l.nextPosition >= len(l.input) {
		return 0
	}
	return l.input[l.position]
}

func (l *Lexer) nextNextChar() uint8 {
	if l.nextNextPosition >= len(l.input) {
		return 0
	}
	return l.input[l.nextPosition]
}

func (l *Lexer) skipWhiteSpace() {
	for {
		if l.ch == ' ' || l.ch == '\t' || l.ch == '\r' || l.ch == '\n' {
			l.readChar()
		} else {
			break
		}
	}
}

func (l *Lexer) isDigit() bool {
	if '0' <= l.currentChar() && l.currentChar() <= '9' {
		return true
	}
	return false
}

func (l *Lexer) isAlpha() bool {
	if ('a' <= l.currentChar() && l.currentChar() <= 'z') || ('A' <= l.currentChar() && l.currentChar() <= 'Z') {
		return true
	}
	return false
}

func (l *Lexer) isAllowedInIdentifier() bool {
	if l.currentChar() == '_' || l.isAlpha() {
		return true
	}
	return false
}

func (l *Lexer) readChar() {
	if l.position >= len(l.input) {
		l.ch = 0 // 到达文件末尾，应当返回EOF
		return
	}
	l.ch = l.input[l.position]
	l.position++
	l.nextPosition = l.position + 1
	l.nextNextPosition = l.nextPosition + 1
}

func (l *Lexer) ReadToken() *token.Token {
	l.skipWhiteSpace()
	switch l.ch {
	case 0:
		l.end = true
		return token.NewToken(token.EOF, "EOF")
	case '=':
		ch := l.ch
		l.readChar()
		return token.NewToken(token.Equal, string(ch))
	case '+':
		if l.nextChar() == '=' {
			literal := strings.Builder{}
			literal.WriteByte(l.ch)
			l.readChar() // read =
			literal.WriteByte(l.ch)
			l.readChar() // skip to next char
			return token.NewToken(token.PlusEqual, literal.String())
		}
		ch := l.ch
		l.readChar() // skip +
		return token.NewToken(token.Plus, string(ch))
	case '-':
		if l.nextChar() == '=' {
			literal := strings.Builder{}
			literal.WriteByte(l.ch)
			l.readChar() // read =
			literal.WriteByte(l.ch)
			l.readChar() // skip to next char
			return token.NewToken(token.MinusEqual, literal.String())
		}
		ch := l.ch
		l.readChar() // skip -
		return token.NewToken(token.Minus, string(ch))
	case '*':
		if l.nextChar() == '=' {
			literal := strings.Builder{}
			literal.WriteByte(l.ch)
			l.readChar() // read =
			literal.WriteByte(l.ch)
			l.readChar() // skip to next char
			return token.NewToken(token.ProductEqual, literal.String())
		}
		ch := l.ch
		l.readChar() // skip *
		return token.NewToken(token.Product, string(ch))
	case '/':
		if l.nextChar() == '=' {
			literal := strings.Builder{}
			literal.WriteByte(l.ch)
			l.readChar() // read =
			literal.WriteByte(l.ch)
			l.readChar() // skip to next char
			return token.NewToken(token.DivideEqual, literal.String())
		}
		ch := l.ch
		l.readChar() // skip /
		return token.NewToken(token.Divide, string(ch))
	case '(':
		ch := l.ch
		l.readChar()
		return token.NewToken(token.Lparen, string(ch))
	case ')':
		ch := l.ch
		l.readChar()
		return token.NewToken(token.Rparen, string(ch))
	case '[':
		ch := l.ch
		l.readChar()
		return token.NewToken(token.Lbracket, string(ch))
	case ']':
		ch := l.ch
		l.readChar()
		return token.NewToken(token.Rbracket, string(ch))
	case '{':
		ch := l.ch
		l.readChar()
		return token.NewToken(token.Lbrace, string(ch))
	case '}':
		ch := l.ch
		l.readChar()
		return token.NewToken(token.Rbrace, string(ch))
	case '"':
		ch := l.ch
		l.readChar()
		return token.NewToken(token.DoubleQuotes, string(ch))
	case ';':
		ch := l.ch
		l.readChar()
		return token.NewToken(token.SemiColon, string(ch))
	case ':':
		ch := l.ch
		l.readChar()
		return token.NewToken(token.Colon, string(ch))
	case ',':
		ch := l.ch
		l.readChar()
		return token.NewToken(token.Comma, string(ch))
	default:
		if l.isDigit() {
			literal := l.readDigits()
			return token.NewToken(token.Digits, literal)
		} else if l.isAllowedInIdentifier() {
			literal := l.readIdentifier()
			typ := l.lookup(literal)
			return token.NewToken(typ, literal)
		}
		return token.NewToken(token.Unknown, "Unknown")
	}
}

func (l *Lexer) readDigits() string {
	ret := strings.Builder{}
	for {
		if l.isDigit() {
			ret.WriteByte(l.ch)
			l.readChar()
		} else {
			break
		}
	}
	return ret.String()
}

func (l *Lexer) readIdentifier() string {
	ret := strings.Builder{}
	for {
		if l.isAllowedInIdentifier() {
			ret.WriteByte(l.ch)
			l.readChar()
		} else {
			break
		}
	}
	//
	return ret.String()
}

func (l *Lexer) EOF() bool {
	if l.end {
		return true
	}
	return false
}

func (l *Lexer) lookup(literal string) int {
	typ, ok := token.Keywords[literal]
	if ok {
		return typ
	}
	return token.Identifier
}
