package lex

import (
	"testing"

	"gitcode.com/deyiyangyang/gascheme/tok"
)

type TokenTest struct {
	expectedType   tok.TokenType
	expectedLexeme string
}

func TestNextToken(t *testing.T) {
	input := `
  ; This is a comment
  (define x 10)
  (define y -20.5)
  (define z 3/4)
  (define str "hello world")
  (define char #\a)
  (define bool #t)
  (define bool2 #f)
  'symbol
  (+ 1 2)
  (<= 3 4)
  (> 5 6)
  (= 7 7)
  (? ! $ % & * / : ~ _ ^)
  `

	l := NewLexer(input)

	tests := []TokenTest{
		{tok.LPAREN, "("},
		{tok.SYMBOL, "define"},
		{tok.SYMBOL, "x"},
		{tok.INT, "10"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "define"},
		{tok.SYMBOL, "y"},
		{tok.REAL, "-20.5"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "define"},
		{tok.SYMBOL, "z"},
		{tok.RATIONAL, "3/4"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "define"},
		{tok.SYMBOL, "str"},
		{tok.STRING, "hello world"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "define"},
		{tok.SYMBOL, "char"},
		{tok.CHAR, "#\\a"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "define"},
		{tok.SYMBOL, "bool"},
		{tok.BOOLEAN, "#t"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "define"},
		{tok.SYMBOL, "bool2"},
		{tok.BOOLEAN, "#f"},
		{tok.RPAREN, ")"},
		{tok.QUOTE, "'"},
		{tok.SYMBOL, "symbol"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "+"},
		{tok.INT, "1"},
		{tok.INT, "2"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "<="},
		{tok.INT, "3"},
		{tok.INT, "4"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, ">"},
		{tok.INT, "5"},
		{tok.INT, "6"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "="},
		{tok.INT, "7"},
		{tok.INT, "7"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "?"},
		{tok.SYMBOL, "!"},
		{tok.SYMBOL, "$"},
		{tok.SYMBOL, "%"},
		{tok.SYMBOL, "&"},
		{tok.SYMBOL, "*"},
		{tok.SYMBOL, "/"},
		{tok.SYMBOL, ":"},
		{tok.SYMBOL, "~"},
		{tok.SYMBOL, "_"},
		{tok.SYMBOL, "^"},
		{tok.RPAREN, ")"},
		{tok.EOF, ""},
	}

	for i, tt := range tests {
		token := l.NextToken()

		if token.Type != tt.expectedType {
			t.Errorf("tests[%d] - tokentype wrong. expected=%q, got=%q",
				i, tt.expectedType, token.Type)
		}

		if token.Lexeme != tt.expectedLexeme {
			t.Errorf("tests[%d] - lexeme wrong. expected=%q, got=%q",
				i, tt.expectedLexeme, token.Lexeme)
		}
	}
}

func TestIllegalToken(t *testing.T) {
	input := `|` // 使用|字符作为非法字符测试用例

	l := NewLexer(input)
	token := l.NextToken()

	if token.Type != tok.ILLEGAL {
		t.Errorf("expected ILLEGAL token type, got %q", token.Type)
	}

	if token.Lexeme != "|" {
		t.Errorf("expected '|' lexeme, got %q", token.Lexeme)
	}
}

func TestStringWithSpecialChars(t *testing.T) {
	input := `"special chars: !@#$%^&*()"`

	l := NewLexer(input)
	token := l.NextToken()

	if token.Type != tok.STRING {
		t.Errorf("expected STRING token type, got %q", token.Type)
	}

	if token.Lexeme != "special chars: !@#$%^&*()" {
		t.Errorf("expected 'special chars: !@#$%%^&*()' lexeme, got %q", token.Lexeme)
	}
}

func TestComplexSymbols(t *testing.T) {
	input := `+ - * / = < > <= >= ? ! $ % & * / : ~ _ ^`

	l := NewLexer(input)

	tests := []TokenTest{
		{tok.SYMBOL, "+"},
		{tok.SYMBOL, "-"},
		{tok.SYMBOL, "*"},
		{tok.SYMBOL, "/"},
		{tok.SYMBOL, "="},
		{tok.SYMBOL, "<"},
		{tok.SYMBOL, ">"},
		{tok.SYMBOL, "<="},
		{tok.SYMBOL, ">="},
		{tok.SYMBOL, "?"},
		{tok.SYMBOL, "!"},
		{tok.SYMBOL, "$"},
		{tok.SYMBOL, "%"},
		{tok.SYMBOL, "&"},
		{tok.SYMBOL, "*"},
		{tok.SYMBOL, "/"},
		{tok.SYMBOL, ":"},
		{tok.SYMBOL, "~"},
		{tok.SYMBOL, "_"},
		{tok.SYMBOL, "^"},
		{tok.EOF, ""},
	}

	for i, tt := range tests {
		token := l.NextToken()

		if token.Type != tt.expectedType {
			t.Errorf("tests[%d] - tokentype wrong. expected=%q, got=%q",
				i, tt.expectedType, token.Type)
		}

		if token.Lexeme != tt.expectedLexeme {
			t.Errorf("tests[%d] - lexeme wrong. expected=%q, got=%q",
				i, tt.expectedLexeme, token.Lexeme)
		}
	}
}

func TestNumericLeadingSymbols(t *testing.T) {
	input := `1st 2nd 3rd 4th 123abc abc123 123!@#`

	l := NewLexer(input)

	tests := []TokenTest{
		{tok.SYMBOL, "1st"},
		{tok.SYMBOL, "2nd"},
		{tok.SYMBOL, "3rd"},
		{tok.SYMBOL, "4th"},
		{tok.SYMBOL, "123abc"},
		{tok.SYMBOL, "abc123"},
		{tok.SYMBOL, "123!@#"},
		{tok.EOF, ""},
	}

	for i, tt := range tests {
		token := l.NextToken()

		if token.Type != tt.expectedType {
			t.Errorf("tests[%d] - tokentype wrong. expected=%q, got=%q",
				i, tt.expectedType, token.Type)
		}

		if token.Lexeme != tt.expectedLexeme {
			t.Errorf("tests[%d] - lexeme wrong. expected=%q, got=%q",
				i, tt.expectedLexeme, token.Lexeme)
		}
	}
}

// 调试函数，打印lexer实际产生的token序列
func TestDebugMultiLineInput(t *testing.T) {
	input := `(define factorial
  (lambda (n)
    (if (<= n 1)
        1
        (* n (factorial (- n 1))))))

(define numbers
  (list 1 2 3 4 5))

(define string-with-newlines "line1\nline2\nline3")

;; This is a multi-line comment
;; testing lexer with comments
(+ 10 20)`

	l := NewLexer(input)

	t.Log("实际产生的token序列：")
	tokenCount := 0
	for {
		token := l.NextToken()
		t.Logf("Token %d: Type=%q, Lexeme=%q", tokenCount, token.Type, token.Lexeme)
		tokenCount++
		if token.Type == tok.EOF {
			break
		}
	}
}

// 多行输入测试函数
func TestMultiLineInput(t *testing.T) {
	input := `(define factorial
  (lambda (n)
    (if (<= n 1)
        1
        (* n (factorial (- n 1))))))

(define numbers
  (list 1 2 3 4 5))

(define string-with-newlines "line1\nline2\nline3")

;; This is a multi-line comment
;; testing lexer with comments
(+ 10 20)`

	l := NewLexer(input)

	// 预期的token序列 - 根据调试输出调整
	tests := []TokenTest{
		{tok.LPAREN, "("},
		{tok.SYMBOL, "define"},
		{tok.SYMBOL, "factorial"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "lambda"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "n"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "if"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "<="},
		{tok.SYMBOL, "n"},
		{tok.INT, "1"},
		{tok.RPAREN, ")"},
		{tok.INT, "1"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "*"},
		{tok.SYMBOL, "n"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "factorial"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "-"},
		{tok.SYMBOL, "n"},
		{tok.INT, "1"},
		{tok.RPAREN, ")"},
		{tok.RPAREN, ")"},
		{tok.RPAREN, ")"},
		{tok.RPAREN, ")"},
		{tok.RPAREN, ")"},
		{tok.RPAREN, ")"}, // 额外的右括号
		{tok.LPAREN, "("},
		{tok.SYMBOL, "define"},
		{tok.SYMBOL, "numbers"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "list"},
		{tok.INT, "1"},
		{tok.INT, "2"},
		{tok.INT, "3"},
		{tok.INT, "4"},
		{tok.INT, "5"},
		{tok.RPAREN, ")"},
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "define"},
		{tok.SYMBOL, "string-with-newlines"},
		{tok.STRING, "line1\\nline2\\nline3"}, // 转义的换行符
		{tok.RPAREN, ")"},
		{tok.LPAREN, "("},
		{tok.SYMBOL, "+"},
		{tok.INT, "10"},
		{tok.INT, "20"},
		{tok.RPAREN, ")"},
		{tok.EOF, ""},
	}

	for i, tt := range tests {
		token := l.NextToken()

		if token.Type != tt.expectedType {
			t.Errorf("tests[%d] - tokentype wrong. expected=%q, got=%q",
				i, tt.expectedType, token.Type)
		}

		if token.Lexeme != tt.expectedLexeme {
			t.Errorf("tests[%d] - lexeme wrong. expected=%q, got=%q",
				i, tt.expectedLexeme, token.Lexeme)
		}
	}
}
