package lex

import (
	"fmt"
	"src/token"
	"unicode"
	"unicode/utf8"
)

const sover = -1

type Lexer struct {
	cur_type token.Token
	src      string //source

	ch rune //当前获取字符

	pos   int //读取的位置
	start int //起始的位置
	width int //token 的宽度

	linenNum int
	colNum   int

	errors []string
}

func NewLexer(src string) *Lexer {
	l := &Lexer{
		src:      src,
		linenNum: 1,
	}

	l.next()

	return l
}

func (l *Lexer) next() rune {
	if l.pos >= len(l.src) {
		l.ch = sover
		l.width = 0
		return l.ch
	}

	r, w := utf8.DecodeRuneInString(l.src[l.pos:])
	l.width = w
	l.pos += l.width
	l.colNum += l.width
	l.ch = r
	if l.ch == '\n' {
		l.linenNum++
		//fmt.Println("add pos=", l.pos-l.width, " ln=", l.linenNum)
	}

	return l.ch
}

func (l *Lexer) err(e string) {
	l.errors = append(l.errors, e)
}

func (l *Lexer) errf(f string, v ...interface{}) {
	l.errors = append(l.errors, fmt.Sprintf(f, v...))
}

func (l *Lexer) backup() {
	l.pos -= l.width
	l.colNum -= l.width
	if l.ch == '\n' && l.width != 0 {
		l.linenNum--
		//fmt.Println("del pos=", l.pos-l.width, " ln=", l.linenNum)
	}
}

func (l *Lexer) peek() rune {
	r := l.next()
	l.backup()

	return r
}

func (l *Lexer) ignore() {
	l.start = l.pos
	if l.start > 0 {
		l.start -= l.width
	}
}

func (l *Lexer) finish() (token.Token, token.Position, string) {
	return token.EOF, token.Position{}, ""
}

func (l *Lexer) token() (token.Token, token.Position, string) {
	val := l.src[l.start:l.pos]
	pos := token.Position{l.linenNum, l.colNum - (l.pos - l.start)}

	l.start = l.pos
	return l.cur_type, pos, val
}

func (l *Lexer) skipWhitespace() {
	for unicode.IsSpace(l.ch) {
		l.next()
	}
}

func isLetter(r rune) bool {
	return unicode.IsLetter(r) || r == '_'
}

func (l *Lexer) scanIdentifier() string {
	for isLetter(l.ch) || unicode.IsDigit(l.ch) {
		l.next()
	}
	l.backup()

	return string(l.src[l.start:l.pos])
}

func (l *Lexer) cValid(s rune) bool {
	return (s > sover)
}

func lower(ch rune) rune     { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
func isHex(ch rune) bool     { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' }

func (l *Lexer) scanNumber() {
	nb := false
	l.cur_type = token.ILLEGAL
	//fmt.Printf("num first=%c\n", l.ch)
	if l.ch == '0' {
		l.next()
		nb = true
		if lower(l.ch) == 'x' {
			//16进制
			for isHex(l.ch) {
				l.next()
			}
		}
	}
	for isDecimal(l.ch) {
		l.next()
		nb = true
	}

	if nb {
		l.backup()
	}
	last := l.peek()
	if !unicode.IsSpace(last) && last != 'L' && !l.cValid(last) {
		l.cur_type = token.ILLEGAL
	} else {
		l.cur_type = token.NUMBER
	}
}

func (l *Lexer) Scan() (token.Token, token.Position, string) {
	l.skipWhitespace()
	if l.ch < 0 {
		return l.finish()
	}
	l.ignore()
	switch ch := l.ch; {
	case isLetter(ch):
		tok := l.scanIdentifier()
		l.cur_type = token.Lookup(tok)
	case isDecimal(ch) || ch == '.' && isDecimal(rune(l.peek())):
		l.scanNumber()
	default:
		l.next()
		switch(ch) {
		case -1:
			l.finish()
		default:
		 l.cur_type = token.ILLEGAL
		}
		
	}

	return l.token()
}
