package lex

import (
	"fmt"
	"math"
	"math/big"
	"math/cmplx"
	"reflect"
	"showen7/lex"
	"showen7/num"
	"showen7/tok"
	"testing"
)

func TestLexer001(t *testing.T) {
	tests := []struct {
		input    string
		expected []tok.Token
	}{
		{"", []tok.Token{{Type: tok.EOF, Line: 1, Column: 1}}},
		{
			"(",
			[]tok.Token{
				{Type: tok.LPAREN, Lexeme: "(", Literal: "(", Line: 1, Column: 1},
				{Type: tok.EOF, Line: 1, Column: 2},
			},
		},
		{
			"()",
			[]tok.Token{
				{Type: tok.LPAREN, Lexeme: "(", Literal: "(", Line: 1, Column: 1},
				{Type: tok.RPAREN, Lexeme: ")", Literal: ")", Line: 1, Column: 2},
				{Type: tok.EOF, Line: 1, Column: 3},
			},
		},
		{
			"'",
			[]tok.Token{
				{Type: tok.QUOTE, Lexeme: "'", Literal: "'", Line: 1, Column: 1},
				{Type: tok.EOF, Line: 1, Column: 2},
			},
		},
		{
			"'x",
			[]tok.Token{
				{Type: tok.QUOTE, Lexeme: "'", Literal: "'", Line: 1, Column: 1},
				{Type: tok.IDENTIFIER, Lexeme: "x", Literal: "x", Line: 1, Column: 2},
				{Type: tok.EOF, Line: 1, Column: 3},
			},
		},
		{
			"'(1 2 3)",
			[]tok.Token{
				{Type: tok.QUOTE, Lexeme: "'", Literal: "'", Line: 1, Column: 1},
				{Type: tok.LPAREN, Lexeme: "(", Literal: "(", Line: 1, Column: 2},
				{Type: tok.INTEGER, Lexeme: "1", Literal: num.Integer(1), Line: 1, Column: 3},
				{Type: tok.INTEGER, Lexeme: "2", Literal: num.Integer(2), Line: 1, Column: 5},
				{Type: tok.INTEGER, Lexeme: "3", Literal: num.Integer(3), Line: 1, Column: 7},
				{Type: tok.RPAREN, Lexeme: ")", Literal: ")", Line: 1, Column: 8},
				{Type: tok.EOF, Line: 1, Column: 9},
			},
		},
		{"    ", []tok.Token{{Type: tok.EOF, Line: 1, Column: 5}}},
		{"    \n", []tok.Token{{Type: tok.EOF, Line: 2, Column: 1}}},
		{"    \n   ", []tok.Token{{Type: tok.EOF, Line: 2, Column: 4}}}, //*/
		{"'(a . b)",
			[]tok.Token{
				{Type: tok.QUOTE, Lexeme: "'", Literal: "'", Line: 1, Column: 1},
				{Type: tok.LPAREN, Lexeme: "(", Literal: "(", Line: 1, Column: 2},
				{Type: tok.IDENTIFIER, Lexeme: "a", Literal: "a", Line: 1, Column: 3},
				{Type: tok.IDENTIFIER, Lexeme: ".", Literal: ".", Line: 1, Column: 5},
				{Type: tok.IDENTIFIER, Lexeme: "b", Literal: "b", Line: 1, Column: 7},
				{Type: tok.RPAREN, Lexeme: ")", Literal: ")", Line: 1, Column: 8},
				{Type: tok.EOF, Line: 1, Column: 9},
			},
		}, //*/
	}
	for _, tt := range tests {
		l := lex.NewLexer(tt.input)
		tokens := l.ReadTokens()
		if len(tokens) != len(tt.expected) {
			t.Errorf("Input: %v, Length expected %d tokens, got %d", tt.input, len(tt.expected), len(tokens))
		}
		for i, token := range tokens {
			if token != tt.expected[i] {
				t.Errorf("Input: %v, Expected %v tokens, got %v", tt.input, tt.expected, tokens)
			}
		}
	}
}

func TestLexerComment(t *testing.T) {
	tests := []struct {
		input    string
		expected []tok.Token
	}{
		{";a",
			[]tok.Token{
				{Type: tok.COMMENT, Lexeme: ";a", Literal: ";a", Line: 1, Column: 2},
				{Type: tok.EOF, Line: 1, Column: 3},
			}},
		/*{";(this is a comment)",
			[]tok.Token{
				{Type: tok.EOF, Line: 1, Column: 21},
				{Type: tok.EOF, Line: 1, Column: 9},
			}},
		/*{"(define x 1); comment", []tok.Token{
			{Type: tok.LPAREN, Lexeme: "(", Literal: "(", Line: 1, Column: 1},
			{Type: tok.IDENTIFIER, Lexeme: "define", Literal: "define", Line: 1, Column: 2},
			{Type: tok.IDENTIFIER, Lexeme: "x", Literal: "x", Line: 1, Column: 9},
			{Type: tok.INTEGER, Lexeme: "1", Literal: num.Integer(1), Line: 1, Column: 11},
			{Type: tok.RPAREN, Lexeme: ")", Literal: ")", Line: 1, Column: 12},
			{Type: tok.COMMENT, Lexeme: ";(this is a comment)", Literal: ";(this is a comment)", Line: 1, Column: 13},
			{Type: tok.EOF, Line: 1, Column: 21},
		}},
			{"(define x ; comment\n1)", []tok.Token{
				{Type: tok.LPAREN, Lexeme: "(", Literal: "(", Line: 1, Column: 1},
				{Type: tok.IDENTIFIER, Lexeme: "define", Literal: "define", Line: 1, Column: 7},
				{Type: tok.IDENTIFIER, Lexeme: "x", Literal: "x", Line: 1, Column: 9},
				{Type: tok.INTEGER, Lexeme: "1", Literal: num.Integer(1), Line: 2, Column: 1},
				{Type: tok.RPAREN, Lexeme: ")", Literal: ")", Line: 2, Column: 2},
				{Type: tok.EOF, Line: 2, Column: 3},
			}},
			{";; multiple semicolons", []tok.Token{{Type: tok.EOF, Line: 1, Column: 23}}},
			{"(+ 2 ; comment\n 3)", []tok.Token{
				{Type: tok.LPAREN, Lexeme: "(", Literal: "(", Line: 1, Column: 1},
				{Type: tok.IDENTIFIER, Lexeme: "+", Literal: "+", Line: 1, Column: 2},
				{Type: tok.INTEGER, Lexeme: "2", Literal: num.Integer(2), Line: 1, Column: 4},
				{Type: tok.INTEGER, Lexeme: "3", Literal: num.Integer(3), Line: 2, Column: 2},
				{Type: tok.RPAREN, Lexeme: ")", Literal: ")", Line: 2, Column: 3},
				{Type: tok.EOF, Line: 2, Column: 4},
			}},
			{"; comment line 1\n; comment line 2\n(define x 1)", []tok.Token{
				{Type: tok.LPAREN, Lexeme: "(", Literal: "(", Line: 3, Column: 1},
				{Type: tok.IDENTIFIER, Lexeme: "define", Literal: "define", Line: 3, Column: 7},
				{Type: tok.IDENTIFIER, Lexeme: "x", Literal: "x", Line: 3, Column: 9},
				{Type: tok.INTEGER, Lexeme: "1", Literal: num.Integer(1), Line: 3, Column: 11},
				{Type: tok.RPAREN, Lexeme: ")", Literal: ")", Line: 3, Column: 12},
				{Type: tok.EOF, Line: 3, Column: 13},
			}},//*/
	}
	for _, tt := range tests {
		l := lex.NewLexer(tt.input)
		tokens := l.ReadTokens()
		if len(tokens) != len(tt.expected) {
			t.Errorf("Input: %v, Length expected %d tokens, got %d", tt.input, len(tt.expected), len(tokens))
		}
		for i, token := range tokens {
			if token != tt.expected[i] {
				t.Errorf("Input: %v, Expected %v tokens, got %v", tt.input, tt.expected, tokens)
			}
		}
	}
}

func TestLexer002(t *testing.T) {

	// Test character literals separately
	charTests := []struct {
		name     string
		input    string
		expected tok.Token
	}{
		{
			name:  "character a",
			input: "#\\a",
			expected: tok.Token{
				Type:    tok.CHARACTER,
				Lexeme:  "#\\a",
				Literal: byte('a'),
				Line:    1,
				Column:  3,
			},
		},
		{
			name:  "character A",
			input: "#\\A",
			expected: tok.Token{
				Type:    tok.CHARACTER,
				Lexeme:  "#\\A",
				Literal: byte('A'),
				Line:    1,
				Column:  3,
			},
		},
		{
			name:  "character 5",
			input: "#\\5",
			expected: tok.Token{
				Type:    tok.CHARACTER,
				Lexeme:  "#\\5",
				Literal: byte('5'),
				Line:    1,
				Column:  3,
			},
		},
	}

	for _, tt := range charTests {
		t.Run(tt.name, func(t *testing.T) {
			lexer := lex.NewLexer(tt.input)
			token := lexer.NextToken()

			if token.Type != tt.expected.Type {
				t.Errorf("TokenType mismatch. Expected %v, got %v", tt.expected.Type, token.Type)
			}

			if token.Lexeme != tt.expected.Lexeme {
				t.Errorf("Lexeme mismatch. Expected %v, got %v", tt.expected.Lexeme, token.Lexeme)
			}

			// Compare literal values properly
			if token.Literal != tt.expected.Literal {
				t.Errorf("Literal mismatch. Expected %v, got %v", tt.expected.Literal, token.Literal)
			}

			if token.Line != tt.expected.Line {
				t.Errorf("Line mismatch. Expected %v, got %v", tt.expected.Line, token.Line)
			}

			if token.Column != tt.expected.Column {
				t.Errorf("Column mismatch. Expected %v, got %v", tt.expected.Column, token.Column)
			}
		})
	}

	// Test character literals separately with proper subtests
	t.Run("character_literals", func(t *testing.T) {
		charTests := []struct {
			name     string
			input    string
			expected tok.Token
		}{
			{
				name:  "character a",
				input: "#\\a",
				expected: tok.Token{
					Type:    tok.CHARACTER,
					Lexeme:  "#\\a",
					Literal: byte('a'),
					Line:    1,
					Column:  3,
				},
			},
			{
				name:  "character A",
				input: "#\\A",
				expected: tok.Token{
					Type:    tok.CHARACTER,
					Lexeme:  "#\\A",
					Literal: byte('A'),
					Line:    1,
					Column:  3,
				},
			},
			{
				name:  "character 5",
				input: "#\\5",
				expected: tok.Token{
					Type:    tok.CHARACTER,
					Lexeme:  "#\\5",
					Literal: byte('5'),
					Line:    1,
					Column:  3,
				},
			},
			{
				name:  "character space",
				input: "#\\space",
				expected: tok.Token{
					Type:    tok.CHARACTER,
					Lexeme:  "#\\space",
					Literal: byte(' '),
					Line:    1,
					Column:  7,
				},
			},
			{
				name:  "character newline",
				input: "#\\newline",
				expected: tok.Token{
					Type:    tok.CHARACTER,
					Lexeme:  "#\\newline",
					Literal: byte('\n'),
					Line:    1,
					Column:  9,
				},
			},
			{
				name:  "character tab",
				input: "#\\tab",
				expected: tok.Token{
					Type:    tok.CHARACTER,
					Lexeme:  "#\\tab",
					Literal: byte('\t'),
					Line:    1,
					Column:  5,
				},
			},
		}

		for _, tt := range charTests {
			t.Run(tt.name, func(t *testing.T) {
				l := lex.NewLexer(tt.input)
				token := l.NextToken()

				if token.Type != tt.expected.Type {
					t.Errorf("TokenType mismatch. Expected %v, got %v", tt.expected.Type, token.Type)
				}

				if token.Lexeme != tt.expected.Lexeme {
					t.Errorf("Lexeme mismatch. Expected %v, got %v", tt.expected.Lexeme, token.Lexeme)
				}

				if token.Literal != tt.expected.Literal {
					t.Errorf("Literal mismatch. Expected %v, got %v", tt.expected.Literal, token.Literal)
				}

				if token.Line != tt.expected.Line {
					t.Errorf("Line mismatch. Expected %v, got %v", tt.expected.Line, token.Line)
				}

				if token.Column != tt.expected.Column {
					t.Errorf("Column mismatch. Expected %v, got %v", tt.expected.Column, token.Column)
				}
			})
		}
	})

}

func TestLexerBoolean(t *testing.T) {
	tests := []struct {
		input    string
		expected []tok.Token
	}{
		{
			"#t",
			[]tok.Token{
				{Type: tok.BOOLEAN, Lexeme: "#t", Literal: true, Line: 1, Column: 2},
				{Type: tok.EOF, Line: 1, Column: 3},
			},
		},
		{
			"#t #F",
			[]tok.Token{
				{Type: tok.BOOLEAN, Lexeme: "#t", Literal: true, Line: 1, Column: 2},
				{Type: tok.BOOLEAN, Lexeme: "#F", Literal: false, Line: 1, Column: 5},
				{Type: tok.EOF, Line: 1, Column: 6},
			},
		},
		{
			"#T #f",
			[]tok.Token{
				{Type: tok.BOOLEAN, Lexeme: "#T", Literal: true, Line: 1, Column: 2},
				{Type: tok.BOOLEAN, Lexeme: "#f", Literal: false, Line: 1, Column: 5},
				{Type: tok.EOF, Line: 1, Column: 6},
			},
		},
		{
			"#t#f",
			[]tok.Token{
				{Type: tok.BOOLEAN, Lexeme: "#t", Literal: true, Line: 1, Column: 2},
				{Type: tok.BOOLEAN, Lexeme: "#f", Literal: false, Line: 1, Column: 4},
				{Type: tok.EOF, Line: 1, Column: 5},
			},
		},
	}
	for _, tt := range tests {
		l := lex.NewLexer(tt.input)
		tokens := l.ReadTokens()
		if len(tokens) != len(tt.expected) {
			t.Errorf("Input: %v, Length expected %d tokens, got %d", tt.input, len(tt.expected), len(tokens))
		}
		for i, token := range tokens {
			if token != tt.expected[i] {
				t.Errorf("Input: %v, Expected %v tokens, got %v", tt.input, tt.expected, tokens)
			}
		}
	}

}

func TestLexerCharacter(t *testing.T) {
	tests := []struct {
		input    string
		expected []tok.Token
	}{
		{
			"#\\A",
			[]tok.Token{
				{Type: tok.CHARACTER, Lexeme: "#\\A", Literal: byte('A'), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"#\\space",
			[]tok.Token{
				{Type: tok.CHARACTER, Lexeme: "#\\space", Literal: byte(' '), Line: 1, Column: 7},
				{Type: tok.EOF, Line: 1, Column: 8},
			},
		},
		{
			"#\\newline",
			[]tok.Token{
				{Type: tok.CHARACTER, Lexeme: "#\\newline", Literal: byte('\n'), Line: 1, Column: 9},
				{Type: tok.EOF, Line: 1, Column: 10},
			},
		},
		{
			"#\\z",
			[]tok.Token{
				{Type: tok.CHARACTER, Lexeme: "#\\z", Literal: byte('z'), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"#\\1",
			[]tok.Token{
				{Type: tok.CHARACTER, Lexeme: "#\\1", Literal: byte('1'), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"#\\#",
			[]tok.Token{
				{Type: tok.CHARACTER, Lexeme: "#\\#", Literal: byte('#'), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"#\\ ",
			[]tok.Token{
				{Type: tok.CHARACTER, Lexeme: "#\\ ", Literal: byte(' '), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"#\\a #\\B #\\5",
			[]tok.Token{
				{Type: tok.CHARACTER, Lexeme: "#\\a", Literal: byte('a'), Line: 1, Column: 3},
				{Type: tok.CHARACTER, Lexeme: "#\\B", Literal: byte('B'), Line: 1, Column: 7},
				{Type: tok.CHARACTER, Lexeme: "#\\5", Literal: byte('5'), Line: 1, Column: 11},
				{Type: tok.EOF, Line: 1, Column: 12},
			},
		},
	}
	for _, tt := range tests {
		l := lex.NewLexer(tt.input)
		tokens := l.ReadTokens()
		if len(tokens) != len(tt.expected) {
			t.Errorf("Input: %v, Length expected %d tokens, got %d", tt.input, len(tt.expected), len(tokens))
		}
		for i, token := range tokens {
			if !reflect.DeepEqual(token, tt.expected[i]) {
				want := tt.expected[i]
				if want.Literal != token.Literal {
					fmt.Printf("Literal %t!=%t\n", want.Literal, token.Literal)
				}
				t.Errorf("Input: %v, Expected tokens %v, got %v", tt.input, tt.expected[i], token)
			}
		}
	}

}

func TestLexerInteger(t *testing.T) {
	tests := []struct {
		input    string
		expected []tok.Token
	}{
		{
			"1",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "1", Literal: num.Integer(1), Line: 1, Column: 1},
				{Type: tok.EOF, Line: 1, Column: 2},
			},
		},
		{
			"123",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "123", Literal: num.Integer(123), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"-456",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "-456", Literal: num.Integer(-456), Line: 1, Column: 4},
				{Type: tok.EOF, Line: 1, Column: 5},
			},
		},
		{
			"0",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "0", Literal: num.Integer(0), Line: 1, Column: 1},
				{Type: tok.EOF, Line: 1, Column: 2},
			},
		},
		{
			"-0",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "-0", Literal: num.Integer(0), Line: 1, Column: 2},
				{Type: tok.EOF, Line: 1, Column: 3},
			},
		},
		{
			"9223372036854775807",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "9223372036854775807", Literal: num.Integer(9223372036854775807), Line: 1, Column: 19},
				{Type: tok.EOF, Line: 1, Column: 20},
			},
		},
		{
			"-9223372036854775808",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "-9223372036854775808", Literal: num.Integer(-9223372036854775808), Line: 1, Column: 20},
				{Type: tok.EOF, Line: 1, Column: 21},
			},
		},
		{
			"123 -456 789",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "123", Literal: num.Integer(123), Line: 1, Column: 3},
				{Type: tok.INTEGER, Lexeme: "-456", Literal: num.Integer(-456), Line: 1, Column: 8},
				{Type: tok.INTEGER, Lexeme: "789", Literal: num.Integer(789), Line: 1, Column: 12},
				{Type: tok.EOF, Line: 1, Column: 13},
			},
		},
		// Test cases for invalid integers that should not be parsed as integers
		{
			"01",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "01", Literal: num.Integer(1), Line: 1, Column: 2},
				{Type: tok.EOF, Line: 1, Column: 3},
			},
		},
		{
			"12a",
			[]tok.Token{
				{Type: tok.IDENTIFIER, Lexeme: "12a", Literal: "12a", Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"--123",
			[]tok.Token{
				{Type: tok.IDENTIFIER, Lexeme: "--123", Literal: "--123", Line: 1, Column: 5},
				{Type: tok.EOF, Line: 1, Column: 6},
			},
		},
	}
	for _, tt := range tests {
		l := lex.NewLexer(tt.input)
		tokens := l.ReadTokens()
		if len(tokens) != len(tt.expected) {
			t.Errorf("Input: %v, Length expected %d tokens, got %d", tt.input, len(tt.expected), len(tokens))
		}
		for i, token := range tokens {
			if !reflect.DeepEqual(token, tt.expected[i]) {
				t.Errorf("Input: %v, Expected tokens %v, got %v", tt.input, tt.expected[i], token)
			}
		}
	}

}

func TestLexerReal(t *testing.T) {
	tests := []struct {
		input    string
		expected []tok.Token
	}{
		{
			"1.0",
			[]tok.Token{
				{Type: tok.REAL, Lexeme: "1.0", Literal: num.Real(1.0), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"123.456",
			[]tok.Token{
				{Type: tok.REAL, Lexeme: "123.456", Literal: num.Real(123.456), Line: 1, Column: 7},
				{Type: tok.EOF, Line: 1, Column: 8},
			},
		},
		{
			"-456.789",
			[]tok.Token{
				{Type: tok.REAL, Lexeme: "-456.789", Literal: num.Real(-456.789), Line: 1, Column: 8},
				{Type: tok.EOF, Line: 1, Column: 9},
			},
		},
		{
			"0.0",
			[]tok.Token{
				{Type: tok.REAL, Lexeme: "0.0", Literal: num.Real(0.0), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"-0.0",
			[]tok.Token{
				{Type: tok.REAL, Lexeme: "-0.0", Literal: num.Real(0.0), Line: 1, Column: 4},
				{Type: tok.EOF, Line: 1, Column: 5},
			},
		},
		{
			".5",
			[]tok.Token{
				{Type: tok.REAL, Lexeme: ".5", Literal: num.Real(0.5), Line: 1, Column: 2},
				{Type: tok.EOF, Line: 1, Column: 3},
			},
		},
		{
			"-.5",
			[]tok.Token{
				{Type: tok.REAL, Lexeme: "-.5", Literal: num.Real(-0.5), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"123.456 -789.012 345.678",
			[]tok.Token{
				{Type: tok.REAL, Lexeme: "123.456", Literal: num.Real(123.456), Line: 1, Column: 7},
				{Type: tok.REAL, Lexeme: "-789.012", Literal: num.Real(-789.012), Line: 1, Column: 16},
				{Type: tok.REAL, Lexeme: "345.678", Literal: num.Real(345.678), Line: 1, Column: 24},
				{Type: tok.EOF, Line: 1, Column: 25},
			},
		},
		{
			"1e10",
			[]tok.Token{
				{Type: tok.REAL, Lexeme: "1e10", Literal: num.Real(1e10), Line: 1, Column: 4},
				{Type: tok.EOF, Line: 1, Column: 5},
			},
		},
		{
			"1.5e-3",
			[]tok.Token{
				{Type: tok.REAL, Lexeme: "1.5e-3", Literal: num.Real(1.5e-3), Line: 1, Column: 6},
				{Type: tok.EOF, Line: 1, Column: 7},
			},
		},
		{
			"-2.5E+3",
			[]tok.Token{
				{Type: tok.REAL, Lexeme: "-2.5E+3", Literal: num.Real(-2.5e3), Line: 1, Column: 7},
				{Type: tok.EOF, Line: 1, Column: 8},
			},
		},
		// Test cases for invalid reals that should not be parsed as reals
		{
			"1.2a",
			[]tok.Token{
				{Type: tok.IDENTIFIER, Lexeme: "1.2a", Literal: "1.2a", Line: 1, Column: 4},
				{Type: tok.EOF, Line: 1, Column: 5},
			},
		},
		{
			"..123",
			[]tok.Token{
				{Type: tok.IDENTIFIER, Lexeme: "..123", Literal: "..123", Line: 1, Column: 5},
				{Type: tok.EOF, Line: 1, Column: 6},
			},
		},
		{
			"1.2.3",
			[]tok.Token{
				{Type: tok.IDENTIFIER, Lexeme: "1.2.3", Literal: "1.2.3", Line: 1, Column: 5},
				{Type: tok.EOF, Line: 1, Column: 6},
			},
		},
	}
	for _, tt := range tests {
		l := lex.NewLexer(tt.input)
		tokens := l.ReadTokens()
		if len(tokens) != len(tt.expected) {
			t.Errorf("Input: %v, Length expected %d tokens, got %d", tt.input, len(tt.expected), len(tokens))
		}
		for i, token := range tokens {
			if !reflect.DeepEqual(token, tt.expected[i]) {
				t.Errorf("Input: %v, Expected tokens %v, got %v", tt.input, tt.expected[i], token)
			}
		}
	}
}

func TestLexerRational(t *testing.T) {
	tests := []struct {
		input    string
		expected []tok.Token
	}{
		{
			"1/2",
			[]tok.Token{
				{Type: tok.RATIONAL, Lexeme: "1/2", Literal: NewRational(1, 2), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"-3/4",
			[]tok.Token{
				{Type: tok.RATIONAL, Lexeme: "-3/4", Literal: NewRational(-3, 4), Line: 1, Column: 4},
				{Type: tok.EOF, Line: 1, Column: 5},
			},
		},
		{
			"5/1",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "5/1", Literal: num.Integer(5), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"0/1",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "0/1", Literal: num.Integer(0), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"6/3",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "6/3", Literal: num.Integer(2), Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
		{
			"-8/2",
			[]tok.Token{
				{Type: tok.INTEGER, Lexeme: "-8/2", Literal: num.Integer(-4), Line: 1, Column: 4},
				{Type: tok.EOF, Line: 1, Column: 5},
			},
		},
		{
			"123/456",
			[]tok.Token{
				{Type: tok.RATIONAL, Lexeme: "123/456", Literal: NewRational(41, 152), Line: 1, Column: 7},
				{Type: tok.EOF, Line: 1, Column: 8},
			},
		},
		{
			"-9/10",
			[]tok.Token{
				{Type: tok.RATIONAL, Lexeme: "-9/10", Literal: NewRational(-9, 10), Line: 1, Column: 5},
				{Type: tok.EOF, Line: 1, Column: 6},
			},
		},
		{
			"1/2 3/4 5/1",
			[]tok.Token{
				{Type: tok.RATIONAL, Lexeme: "1/2", Literal: NewRational(1, 2), Line: 1, Column: 3},
				{Type: tok.RATIONAL, Lexeme: "3/4", Literal: NewRational(3, 4), Line: 1, Column: 7},
				{Type: tok.INTEGER, Lexeme: "5/1", Literal: num.Integer(5), Line: 1, Column: 11},
				{Type: tok.EOF, Line: 1, Column: 12},
			},
		},
		// Test cases for invalid rationals that should not be parsed as rationals
		{
			"1/",
			[]tok.Token{
				{Type: tok.IDENTIFIER, Lexeme: "1/", Literal: "1/", Line: 1, Column: 2},
				{Type: tok.EOF, Line: 1, Column: 3},
			},
		},
		{
			"/2",
			[]tok.Token{
				{Type: tok.IDENTIFIER, Lexeme: "/2", Literal: "/2", Line: 1, Column: 2},
				{Type: tok.EOF, Line: 1, Column: 3},
			},
		},
		{
			"1/2a",
			[]tok.Token{
				{Type: tok.IDENTIFIER, Lexeme: "1/2a", Literal: "1/2a", Line: 1, Column: 4},
				{Type: tok.EOF, Line: 1, Column: 5},
			},
		},
		{
			"1//2",
			[]tok.Token{
				{Type: tok.IDENTIFIER, Lexeme: "1//2", Literal: "1//2", Line: 1, Column: 4},
				{Type: tok.EOF, Line: 1, Column: 5},
			},
		},
		{
			"1/0",
			[]tok.Token{
				{Type: tok.IDENTIFIER, Lexeme: "1/0", Literal: "1/0", Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 4},
			},
		},
	}
	for _, tt := range tests {
		l := lex.NewLexer(tt.input)
		tokens := l.ReadTokens()
		if len(tokens) != len(tt.expected) {
			t.Errorf("Input: %v, Length expected %d tokens, got %d", tt.input, len(tt.expected), len(tokens))
			continue
		}
		for i, token := range tokens {
			if !reflect.DeepEqual(token, tt.expected[i]) {
				t.Errorf("Input: %v\nExpected tokens[%d]: %#v\nGot: %#v", tt.input, i, tt.expected[i], token)
			}
		}
	}
}

func TestLexerComplex(t *testing.T) {
	// Test that standalone "i" is parsed as an identifier
	l := lex.NewLexer("i")
	tokens := l.ReadTokens()
	if len(tokens) != 2 {
		t.Fatalf("Expected 2 tokens, got %d", len(tokens))
	}

	if tokens[0].Type != tok.IDENTIFIER {
		t.Errorf("Expected first token to be IDENTIFIER, got %v", tokens[0].Type)
	}

	if tokens[0].Lexeme != "i" {
		t.Errorf("Expected first token lexeme to be 'i', got %v", tokens[0].Lexeme)
	}

	if tokens[0].Literal != "i" {
		t.Errorf("Expected first token literal to be 'i', got %v", tokens[0].Literal)
	}

	if tokens[1].Type != tok.EOF {
		t.Errorf("Expected second token to be EOF, got %v", tokens[1].Type)
	}

	// Test that "1+i" is parsed as a complex number
	l = lex.NewLexer("1+i")
	tokens = l.ReadTokens()
	if len(tokens) != 2 {
		t.Fatalf("Expected 2 tokens for '1+i', got %d", len(tokens))
	}

	if tokens[0].Type != tok.COMPLEX {
		t.Errorf("Expected first token to be COMPLEX, got %v", tokens[0].Type)
	}

	if tokens[0].Lexeme != "1+i" {
		t.Errorf("Expected first token lexeme to be '1+i', got %v", tokens[0].Lexeme)
	}

	// Test that "1-i" is parsed as a complex number
	l = lex.NewLexer("1-i")
	tokens = l.ReadTokens()
	if len(tokens) != 2 {
		t.Fatalf("Expected 2 tokens for '1-i', got %d", len(tokens))
	}

	if tokens[0].Type != tok.COMPLEX {
		t.Errorf("Expected first token to be COMPLEX, got %v", tokens[0].Type)
	}

	if tokens[0].Lexeme != "1-i" {
		t.Errorf("Expected first token lexeme to be '1-i', got %v", tokens[0].Lexeme)
	}

	tests := []struct {
		input    string
		expected []tok.Token
	}{
		{
			"1+2i",
			[]tok.Token{
				{Type: tok.COMPLEX, Lexeme: "1+2i", Literal: createComplex(num.Integer(1), num.Integer(2))},
				{Type: tok.EOF},
			},
		},
		{
			"1+i",
			[]tok.Token{
				{Type: tok.COMPLEX, Lexeme: "1+i", Literal: createComplex(num.Integer(1), num.Integer(1))},
				{Type: tok.EOF},
			},
		},
		{
			"1-i",
			[]tok.Token{
				{Type: tok.COMPLEX, Lexeme: "1-i", Literal: createComplex(num.Integer(1), num.Integer(-1))},
				{Type: tok.EOF},
			},
		},
		{
			"3-4i",
			[]tok.Token{
				{Type: tok.COMPLEX, Lexeme: "3-4i", Literal: createComplex(num.Integer(3), num.Integer(-4))},
				{Type: tok.EOF},
			},
		},
		{
			"-1+1.5i",
			[]tok.Token{
				{Type: tok.COMPLEX, Lexeme: "-1+1.5i", Literal: createComplex(num.Integer(-1), num.Real(1.5))},
				{Type: tok.EOF},
			},
		},
		{
			"2.5e3-1i",
			[]tok.Token{
				{Type: tok.COMPLEX, Lexeme: "2.5e3-1i", Literal: createComplex(num.Real(2500.0), num.Integer(-1))},
				{Type: tok.EOF},
			},
		},
		{
			"3i",
			[]tok.Token{
				{Type: tok.COMPLEX, Lexeme: "3i", Literal: createComplex(num.Integer(0), num.Integer(3))},
				{Type: tok.EOF},
			},
		},
		{
			"-2.5i",
			[]tok.Token{
				{Type: tok.COMPLEX, Lexeme: "-2.5i", Literal: createComplex(num.Integer(0), num.Real(-2.5))},
				{Type: tok.EOF},
			},
		},
		{
			"i",
			[]tok.Token{
				{Type: tok.IDENTIFIER, Lexeme: "i", Literal: "i"},
				{Type: tok.EOF},
			},
		},
		{
			"+i",
			[]tok.Token{
				{Type: tok.COMPLEX, Lexeme: "+i", Literal: createComplex(num.Integer(0), num.Integer(1))},
				{Type: tok.EOF},
			},
		},
		{
			"-i",
			[]tok.Token{
				{Type: tok.COMPLEX, Lexeme: "-i", Literal: createComplex(num.Integer(0), num.Integer(-1))},
				{Type: tok.EOF},
			},
		},
		// Test rational complex number 1/2+3/4i
		{
			"1/2+3/4i",
			[]tok.Token{
				{Type: tok.COMPLEX, Lexeme: "1/2+3/4i", Literal: createComplex(NewRational(1, 2), NewRational(3, 4))},
				{Type: tok.EOF},
			},
		},
	}
	for _, tt := range tests {
		l := lex.NewLexer(tt.input)
		tokens := l.ReadTokens()
		if len(tokens) != len(tt.expected) {
			t.Errorf("Input: %v, Length expected %d tokens, got %d", tt.input, len(tt.expected), len(tokens))
			continue
		}
		for i, token := range tokens {
			// Check type
			if token.Type != tt.expected[i].Type {
				t.Errorf("Input: %v, Token %d: Expected type %v, got %v", tt.input, i, tt.expected[i].Type, token.Type)
				continue
			}

			// Check lexeme
			if token.Lexeme != tt.expected[i].Lexeme && tt.expected[i].Lexeme != "" {
				t.Errorf("Input: %v, Token %d: Expected lexeme %v, got %v", tt.input, i, tt.expected[i].Lexeme, token.Lexeme)
				continue
			}

			// Check literal for non-EOF tokens
			if token.Type != tok.EOF && tt.expected[i].Type != tok.EOF {
				// For complex numbers, we need to check the components separately
				if token.Type == tok.COMPLEX {
					actualComplex, ok := token.Literal.(num.Complex)
					if !ok {
						t.Errorf("Input: %v, Token %d: Expected Complex literal, got %T", tt.input, i, token.Literal)
						continue
					}

					expectedComplex, ok := tt.expected[i].Literal.(num.Complex)
					if !ok {
						t.Errorf("Input: %v, Test setup error: Expected Complex literal in test", tt.input)
						continue
					}

					// Since we can't access RealPart and ImagPart directly,
					// we'll use our compareComplex function
					if !compareComplex(actualComplex, expectedComplex) {
						t.Errorf("Input: %v, Token %d: Expected complex %v, got %v", tt.input, i, expectedComplex, actualComplex)
						continue
					}
				} else if !reflect.DeepEqual(token.Literal, tt.expected[i].Literal) {
					t.Errorf("Input: %v, Token %d: Expected literal %v, got %v", tt.input, i, tt.expected[i].Literal, token.Literal)
					continue
				}
			}
		}
	}
}

// compareNumbers compares two numbers for equality, handling the fact that
// 2500.0 and 2500 might be considered equal even though they're different types
func compareNumbers(a, b num.Number) bool {
	switch a.(type) {
	case num.Integer:
		if bInt, ok := b.(num.Integer); ok {
			return a.(num.Integer) == bInt
		}
	case num.BigInteger:
		if bBig, ok := b.(num.BigInteger); ok {
			aBig := a.(num.BigInteger)
			return (*big.Int)(&aBig).Cmp((*big.Int)(&bBig)) == 0
		}
	case num.Rational:
		if bRat, ok := b.(num.Rational); ok {
			aRat := a.(num.Rational)
			return (*big.Rat)(&aRat).Cmp((*big.Rat)(&bRat)) == 0
		}
	case num.Real:
		if bReal, ok := b.(num.Real); ok {
			// Compare with small epsilon for floating point numbers
			return math.Abs(float64(a.(num.Real))-float64(bReal)) < 1e-9
		}
	case num.Complex:
		if bComplex, ok := b.(num.Complex); ok {
			aComplex := a.(num.Complex)
			return cmplx.Abs(complex128(aComplex)-complex128(bComplex)) < 1e-9
		}
	}
	return false
}

// Helper function to compare complex numbers
func compareComplex(actual, expected num.Complex) bool {
	// Since we can't access RealPart and ImagPart fields directly,
	// we'll convert to complex128 and compare
	actualComplex := complex128(actual)
	expectedComplex := complex128(expected)

	// Compare with small epsilon for floating point numbers
	return cmplx.Abs(actualComplex-expectedComplex) < 1e-9
}

func TestLexerString(t *testing.T) {
	tests := []struct {
		input    string
		expected []tok.Token
	}{
		{
			`"hello"`,
			[]tok.Token{
				{Type: tok.STRING, Lexeme: `"hello"`, Literal: "hello", Line: 1, Column: 8},
				{Type: tok.EOF, Line: 1, Column: 8},
			},
		},
		// Empty string
		{
			`""`,
			[]tok.Token{
				{Type: tok.STRING, Lexeme: `""`, Literal: "", Line: 1, Column: 3},
				{Type: tok.EOF, Line: 1, Column: 3},
			},
		},
		// String with spaces
		{
			`"hello world"`,
			[]tok.Token{
				{Type: tok.STRING, Lexeme: `"hello world"`, Literal: "hello world", Line: 1, Column: 14},
				{Type: tok.EOF, Line: 1, Column: 14},
			},
		},
		// Multi-line strings with actual newlines
		{
			"\"line1\nline2\"",
			[]tok.Token{
				{Type: tok.STRING, Lexeme: "\"line1\\nline2\"", Literal: "line1\nline2", Line: 2, Column: 7},
				{Type: tok.EOF, Line: 2, Column: 7},
			},
		},
		// Unicode characters
		{
			`"日本語"`,
			[]tok.Token{
				{Type: tok.STRING, Lexeme: `"日本語"`, Literal: "日本語", Line: 1, Column: 12},
				{Type: tok.EOF, Line: 1, Column: 12},
			},
		},
		// Special symbols
		{
			`"special symbols !@#$%^&*()"`,
			[]tok.Token{
				{Type: tok.STRING, Lexeme: `"special symbols !@#$%^&*()"`, Literal: "special symbols !@#$%^&*()", Line: 1, Column: 29},
				{Type: tok.EOF, Line: 1, Column: 29},
			},
		},
		// String with numbers
		{
			`"123 numbers"`,
			[]tok.Token{
				{Type: tok.STRING, Lexeme: `"123 numbers"`, Literal: "123 numbers", Line: 1, Column: 14},
				{Type: tok.EOF, Line: 1, Column: 14},
			},
		},
	}
	for _, tt := range tests {
		l := lex.NewLexer(tt.input)
		tokens := l.ReadTokens()
		if len(tokens) != len(tt.expected) {
			t.Errorf("Input: %v, Length expected %d tokens, got %d", tt.input, len(tt.expected), len(tokens))
			continue
		}
		for i, token := range tokens {
			if !reflect.DeepEqual(token, tt.expected[i]) {
				t.Errorf("Input: %v\nExpected token[%d]: %#v\nGot: %#v", tt.input, i, tt.expected[i], token)
			}
		}
	}
}

// Helper function to create complex numbers for tests
func createComplex(realPart, imagPart interface{}) num.Complex {
	var realVal, imagVal float64

	// Convert real part
	switch r := realPart.(type) {
	case num.Integer:
		realVal = float64(r)
	case num.Real:
		realVal = float64(r)
	case num.Rational:
		// Convert rational to float64
		rat := (*big.Rat)(&r)
		f, _ := rat.Float64()
		realVal = f
	}

	// Convert imaginary part
	switch i := imagPart.(type) {
	case num.Integer:
		imagVal = float64(i)
	case num.Real:
		imagVal = float64(i)
	case num.Rational:
		// Convert rational to float64
		rat := (*big.Rat)(&i)
		f, _ := rat.Float64()
		imagVal = f
	}

	return num.Complex(complex(realVal, imagVal))
}

func NewRational(n, den int64) num.Rational {
	r := big.NewRat(n, den)
	return num.Rational(*r)
}
