package lex

import (
	"errors"
	"showen5/arithm"
	"showen5/tok"
	"strings"
)

type Lexer struct {
	input        string
	position     int  // current position in input (points to current char)
	readPosition int  // current reading position in input (after current char)
	ch           byte // current char under examination
	line         int  // current line number
	column       int
}

func NewLexer(input string) *Lexer {
	l := &Lexer{input: input}
	l.advance()
	return l
}

func (l *Lexer) advance() {
	if l.readPosition >= len(l.input) {
		l.ch = 0
	} else {
		l.ch = l.input[l.readPosition]
	}
	l.position = l.readPosition
	l.readPosition += 1
	l.column++

	// Update line and column for newlines
	if l.ch == '\n' {
		l.line++
		l.column = 0
	}
}

func (l *Lexer) ReadTokens() []tok.Token {
	var tokens []tok.Token
	for {
		token := l.NextToken()
		tokens = append(tokens, token)
		if token.Type == tok.EOF {
			break
		}
	}
	return tokens
}

func (l *Lexer) NextToken() tok.Token {
	var token tok.Token

	l.skipSpace()

	// Check for special characters and delimiters
	switch l.ch {
	case '(':
		token = tok.Token{
			Type:    tok.LPAREN,
			Lexeme:  "(",
			Literal: "(",
			Line:    l.line,
			Column:  l.column,
		}
	case ')':
		token = tok.Token{
			Type:    tok.RPAREN,
			Lexeme:  ")",
			Literal: ")",
			Line:    l.line,
			Column:  l.column,
		}
	case '\'':
		token = tok.Token{
			Type:    tok.QUOTE,
			Lexeme:  "'",
			Literal: "'",
			Line:    l.line,
			Column:  l.column,
		}
	case '`':
		token = tok.Token{
			Type:    tok.QUASIQUOTE,
			Lexeme:  "`",
			Literal: "`",
			Line:    l.line,
			Column:  l.column,
		}
	case ',':
		// Check if it's unquote-splicing (,@) or just unquote (,)
		if l.peekChar() == '@' {
			token = tok.Token{
				Type:    tok.UNQUOTE_SPLICING,
				Lexeme:  ",@",
				Literal: ",@",
				Line:    l.line,
				Column:  l.column,
			}
			l.advance() // consume ','
		} else {
			token = tok.Token{
				Type:    tok.UNQUOTE,
				Lexeme:  ",",
				Literal: ",",
				Line:    l.line,
				Column:  l.column,
			}
		}
	case '"':
		token = l.readString()
	case ';':
		token = l.readComment()
	case '#':
		p := l.peekChar()
		switch p {
		case 't', 'T':
			token = l.readBoolean(true)
		case 'f', 'F':
			token = l.readBoolean(false)
		case '\\':
			token = l.readCharacter()
		case '(':
			token = tok.Token{Type: tok.VECTOR,
				Lexeme:  "#(",
				Literal: "#(",
				Line:    l.line,
				Column:  l.column,
			}
			l.advance() // consume '#'
		default:
			token = tok.Token{Type: tok.ILLEGAL,
				Lexeme:  string(l.ch),
				Literal: string(l.ch),
				Line:    l.line,
				Column:  l.column,
			}
		}
	case 0:
		token = tok.Token{
			Type:   tok.EOF,
			Line:   l.line,
			Column: l.column,
		}
	default:
		position := l.position
		for !isDelimiter(l.ch) && !l.isEOF() {
			l.advance()
		}
		text := l.input[position:l.position]
		//fmt.Printf("text: %s\n", text)

		// Try to parse as complex first, since it might contain rationals
		if t, err := l.readNumber(text); err == nil {
			token = t
		} else if t, err := l.readIdentifier(text); err == nil {
			token = t
		} else {
			token = tok.Token{
				Type:    tok.ILLEGAL,
				Lexeme:  text,
				Literal: text,
				Line:    l.line,
				Column:  l.column - 1,
			}
		}
		return token
	}

	l.advance()
	return token
}

func (l *Lexer) skipSpace() {
	for isSpace(l.ch) {
		l.advance()
	}
}

func isDelimiter(ch byte) bool {
	return isSpace(ch) || ch == '(' || ch == ')' || ch == '\''
}

func isSpace(ch byte) bool {
	return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
}

func (l *Lexer) readBoolean(literal bool) tok.Token {
	token := tok.Token{
		Type:    tok.BOOLEAN,
		Lexeme:  l.input[l.position : l.position+2],
		Literal: literal,
		Line:    l.line,
		Column:  l.column + 1,
	}
	l.advance()
	l.advance()
	return token
}
func (l *Lexer) readCharacter() tok.Token {
	// Skip the #\
	l.advance() // Skip #
	l.advance() // Skip \

	// Handle special named characters
	if l.ch == 's' && l.peekString(4) == "pace" {
		// Skip the "pace"
		for i := 0; i < 4; i++ {
			l.advance()
		}
		token := tok.Token{
			Type:    tok.CHARACTER,
			Lexeme:  "#\\space",
			Literal: byte(' '),
			Line:    l.line,
			Column:  l.column,
		}
		l.advance() // Move past the last character
		return token
	} else if l.ch == 'n' && l.peekString(6) == "ewline" {
		// Skip the "ewline"
		for i := 0; i < 6; i++ {
			l.advance()
		}
		token := tok.Token{
			Type:    tok.CHARACTER,
			Lexeme:  "#\\newline",
			Literal: byte('\n'),
			Line:    l.line,
			Column:  l.column,
		}
		l.advance() // Move past the last character
		return token
	} else if l.ch == 't' && l.peekString(2) == "ab" {
		// Skip the "ab"
		for i := 0; i < 2; i++ {
			l.advance()
		}
		token := tok.Token{
			Type:    tok.CHARACTER,
			Lexeme:  "#\\tab",
			Literal: byte('\t'),
			Line:    l.line,
			Column:  l.column,
		}
		l.advance() // Move past the last character
		return token
	}

	// Handle single character
	char := l.ch
	lexeme := "#\\" + string(char)
	l.advance()

	token := tok.Token{
		Type:    tok.CHARACTER,
		Lexeme:  lexeme,
		Literal: byte(char), // Ensure we're using byte type
		Line:    l.line,
		Column:  l.column - 1, // Adjust column to point to the character itself
	}

	return token
}

// Helper function to peek at the next n characters as a string
func (l *Lexer) peekString(n int) string {
	if l.readPosition+n > len(l.input) {
		return ""
	}
	return l.input[l.readPosition : l.readPosition+n]
}

func (l *Lexer) readNumber(text string) (tok.Token, error) {
	value, err := arithm.ReadNumber(text)
	if err != nil {
		return tok.Token{}, err
	}

	token := tok.Token{
		Type:    tok.INTEGER,
		Lexeme:  text,
		Literal: value,
		Line:    l.line,
		Column:  l.column - 1,
	}
	return token, nil
}

func (l *Lexer) readIdentifier(text string) (tok.Token, error) {
	t := tok.IDENTIFIER
	var err error = nil
	for _, ch := range []byte(text) {
		if !isLetter(ch) && !isDigit(ch) && !isSpecialInitialChar(ch) {
			t = tok.ILLEGAL
			err = errors.New("invalid identifier format")
		}
	}
	token := tok.Token{
		Type:    t,
		Lexeme:  text,
		Literal: text,
		Line:    l.line,
		Column:  l.column - 1,
	}

	return token, err
}

func (l *Lexer) readString() tok.Token {
	position := l.position
	l.advance()
	for l.ch != '"' {
		if l.isEOF() {
			return tok.Token{
				Type:   tok.ILLEGAL,
				Lexeme: l.input[position:l.position],
				Line:   l.line,
				Column: l.column,
			}
		}
		l.advance()
	}
	literal := l.input[position+1 : l.position]
	// Advance past the closing quote
	l.advance()

	var s strings.Builder
	s.WriteByte('"')
	for _, ch := range literal {
		switch ch {
		case '\a':
			s.WriteString("\\a")
		case '\b':
			s.WriteString("\\b")
		case '\f':
			s.WriteString("\\f")
		case '\n':
			s.WriteString("\\n")
		case '\r':
			s.WriteString("\\r")
		case '\t':
			s.WriteString("\\t")
		case '\v':
			s.WriteString("\\v")
		case '\\':
			s.WriteString(`\\`)
		case '\'':
			s.WriteString(`\'`)
		case '"':
			s.WriteString(`\"`)
		default:
			s.WriteRune(ch)
		}
	}
	s.WriteByte('"')

	return tok.Token{
		Type:    tok.STRING,
		Lexeme:  s.String(),
		Literal: literal,
		Line:    l.line,
		Column:  l.column,
	}
}
func (l *Lexer) peekChar() byte {
	if l.readPosition >= len(l.input) {
		return 0
	}

	return l.input[l.readPosition]
}

func isLetter(ch byte) bool {
	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z'
}

func isDigit(ch byte) bool {
	return '0' <= ch && ch <= '9'
}

// ⟨special initial⟩ −→ ! | $ | % | & | * | / | : | < |=| > | ? |~ |_ |^
func isSpecialInitialChar(ch byte) bool {
	switch ch {
	case '!', '$', '%', '&', '*', '/', ':', '<', '=', '>', '?', '~', '_', '^', '.', '+', '-':
		return true
	default:
		return false
	}
}

func (l *Lexer) isEOF() bool {
	return l.position >= len(l.input)
}
func (l *Lexer) readComment() tok.Token {
	startPos := l.position

	// Read until end of line or EOF
	for !l.isEOF() && l.ch != '\n' {
		l.advance()
	}

	// Extract the comment text (including the semicolon)
	commentText := l.input[startPos:l.position]

	token := tok.Token{
		Type:    tok.COMMENT,
		Lexeme:  commentText,
		Literal: commentText,
		Line:    l.line,
		Column:  l.column - 1,
	}
	return token
}
