package lex

import (
	"strings"
	"unicode"

	"gitcode.com/deyiyangyang/bampoo/tok"
)

// Lexer represents a lexical analyzer for Scheme
type Lexer struct {
	input        string
	position     int  // current position in input (points to current char)
	readPosition int  // current reading position in input (after current char)
	ch           byte // current char under examination
	line         int
	column       int
}

// New creates a new Lexer
func New(input string) *Lexer {
	l := &Lexer{
		input:  input,
		line:   1,
		column: 0,
	}
	l.readChar()
	return l
}

// NextToken gets the next token
func (l *Lexer) NextToken() tok.Token {
	var token tok.Token

	l.skipWhitespace()

	// Handle comments
	if l.ch == ';' {
		l.skipComment()
		l.skipWhitespace()   // Skip any whitespace after comment
		return l.NextToken() // Get the next actual token
	}

	// Handle boolean literals
	if l.ch == '#' {
		l.readChar()
		token.Line = l.line
		token.Column = l.column
		if l.ch == 't' {
			token = l.newToken(tok.BOOLEAN, "#t")
			l.readChar()
		} else if l.ch == 'f' {
			token = l.newToken(tok.BOOLEAN, "#f")
			l.readChar()
		} else {
			token = l.newToken(tok.ILLEGAL, "#"+string(l.ch))
			l.readChar()
		}
		return token
	}

	// Handle quote syntax sugar
	if l.ch == '\'' {
		token = l.newToken(tok.QUOTE, string(l.ch))
		l.readChar()
		return token
	}

	switch l.ch {
	case '(':
		token = l.newToken(tok.LPAREN, string(l.ch))
		l.readChar()
	case ')':
		token = l.newToken(tok.RPAREN, string(l.ch))
		l.readChar()
	case '"':
		token.Type = tok.STRING
		token.Literal = l.readString()
		token.Line = l.line
		token.Column = l.column
		l.readChar() // consume closing quote
	case '+':
		token = l.newToken(tok.PLUS, string(l.ch))
		l.readChar()
	case '-':
		// Check if this is a negative number
		if isDigit(l.peekChar()) || (l.peekChar() == '.' && isDigit(l.input[l.readPosition+1])) {
			l.readChar() // consume the negative sign first
			// Read the number
			number, isReal := l.readNumber()
			token.Literal = "-" + number
			token.Line = l.line
			token.Column = l.column
			// Set the correct token type
			if isReal {
				token.Type = tok.REAL
			} else {
				token.Type = tok.INT
			}
		} else {
			token = l.newToken(tok.MINUS, string(l.ch))
			l.readChar()
		}
	case '*':
		token = l.newToken(tok.MULTIPLY, string(l.ch))
		l.readChar()
	case '/':
		token = l.newToken(tok.DIVIDE, string(l.ch))
		l.readChar()
	case '>':
		l.readChar()
		if l.ch == '=' {
			token = l.newToken(tok.GREATER_OR_EQUAL, ">=")
			l.readChar()
		} else {
			token = l.newToken(tok.GREATER, ">")
		}
	case '<':
		l.readChar()
		if l.ch == '=' {
			token = l.newToken(tok.LESS_OR_EQUAL, "<=")
			l.readChar()
		} else {
			token = l.newToken(tok.LESS, "<")
		}
	case '=':
		token = l.newToken(tok.EQUAL, "=")
		l.readChar()
	case 0:
		token.Literal = ""
		token.Type = tok.EOF
		token.Line = l.line
		token.Column = l.column
	default:
		if isDigit(l.ch) || (l.ch == '.' && isDigit(l.peekChar())) {
			// If starts with a decimal point, consume it
			if l.ch == '.' {
				l.readChar()
				number, _ := l.readNumber()
				token.Literal = "." + number
				token.Type = tok.REAL
			} else {
				// Otherwise, read the number normally
				number, isReal := l.readNumber()
				token.Literal = number
				if isReal {
					token.Type = tok.REAL
				} else {
					token.Type = tok.INT
				}
			}
			token.Line = l.line
			token.Column = l.column
		} else if isLetter(l.ch) || isSpecialInitial(l.ch) {
			literal := l.readSymbol()
			tokenType := tok.SYMBOL

			// Check for special forms
			switch literal {
			case "define":
				tokenType = tok.DEFINE
			case "display":
				tokenType = tok.DISPLAY
			case "begin":
				tokenType = tok.BEGIN
			case "lambda":
				tokenType = tok.LAMBDA
			case "set!":
				tokenType = tok.SET
			case "let":
				tokenType = tok.LET
			case "if":
				tokenType = tok.IF
			case "cond":
				tokenType = tok.COND
			case "and":
				tokenType = tok.AND
			case "or":
				tokenType = tok.OR
			case "not":
				tokenType = tok.NOT
			}

			token = tok.Token{
				Type:    tokenType,
				Literal: literal,
				Line:    l.line,
				Column:  l.column,
			}
		} else {
			token = l.newToken(tok.ILLEGAL, string(l.ch))
			l.readChar()
		}
	}

	return token
}

func (l *Lexer) readChar() {
	if l.readPosition >= len(l.input) {
		l.ch = 0
	} else {
		l.ch = l.input[l.readPosition]
	}
	l.position = l.readPosition
	l.readPosition += 1
	l.column++

	if l.ch == '\n' {
		l.line++
		l.column = 0
	}
}

func (l *Lexer) peekChar() byte {
	if l.readPosition >= len(l.input) {
		return 0
	} else {
		return l.input[l.readPosition]
	}
}

func (l *Lexer) skipWhitespace() {
	for l.ch == ' ' || l.ch == '\t' || l.ch == '\n' || l.ch == '\r' {
		l.readChar()
	}
}

func (l *Lexer) skipComment() {
	// Skip until end of line
	for l.ch != '\n' && l.ch != 0 {
		l.readChar()
	}
}

func (l *Lexer) readString() string {
	var result strings.Builder

	for {
		l.readChar()
		if l.ch == '"' || l.ch == 0 {
			break
		}
		if l.ch == '\\' {
			// Handle escape sequences
			next := l.peekChar()
			l.readChar() // consume the escaped character
			switch next {
			case 'n':
				result.WriteByte('\n')
			case 't':
				result.WriteByte('\t')
			case 'r':
				result.WriteByte('\r')
			case '\\':
				result.WriteByte('\\')
			case '"':
				result.WriteByte('"')
			default:
				// If we don't recognize the escape sequence, include both characters
				result.WriteByte('\\')
				result.WriteByte(next)
			}
		} else {
			result.WriteByte(l.ch)
		}
	}

	// Don't consume the closing quote here
	return result.String()
}

func (l *Lexer) readSymbol() string {
	position := l.position
	for isLetter(l.ch) || isDigit(l.ch) || isSpecialSubsequent(l.ch) {
		l.readChar()
	}
	return l.input[position:l.position]
}

func (l *Lexer) readNumber() (string, bool) {
	position := l.position
	hasDecimal := false

	// Read digits before decimal point
	for isDigit(l.ch) {
		l.readChar()
	}

	// Check for decimal point
	if l.ch == '.' && isDigit(l.peekChar()) {
		l.readChar() // consume the decimal point
		hasDecimal = true
		// Read digits after decimal point
		for isDigit(l.ch) {
			l.readChar()
		}
	}

	return l.input[position:l.position], hasDecimal
}

func (l *Lexer) newToken(tokenType tok.TokenType, ch string) tok.Token {
	return tok.Token{
		Type:    tokenType,
		Literal: ch,
		Line:    l.line,
		Column:  l.column,
	}
}

func isLetter(ch byte) bool {
	return unicode.IsLetter(rune(ch))
}

func isDigit(ch byte) bool {
	return unicode.IsDigit(rune(ch))
}

func isSpecialInitial(ch byte) bool {
	return strings.ContainsRune("!$%&*+-./:<=>?@^_~", rune(ch))
}

func isSpecialSubsequent(ch byte) bool {
	return isSpecialInitial(ch) || ch == '+' || ch == '-' || isDigit(ch)
}
