package utils

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"log"
	"net/http"
	"net/url"
	"os"
	"sort"
	"strconv"
	"strings"
	"sync"
	"sync/atomic"
	"time"

	"encoding/base64"

	"github.com/google/uuid"
	"github.com/mark3labs/mcp-go/mcp"
	"github.com/mark3labs/mcp-go/server"
)

// LogStore configuration constants - based on memory usage analysis
const (
	DefaultMaxLogEntries      = 500              // Reduced from 1000 for safety
	MaxLogEntries             = 2000             // Hard upper limit
	MaxLogEntrySize           = 10 * 1024        // 10KB per entry
	MaxTotalLogMemoryMB       = 50               // 50MB total memory limit
	DefaultQueryLimit         = 50               // Reduced default query limit
	MaxQueryLimit             = 500              // Maximum query limit
	DefaultLogCleanupInterval = 30 * time.Minute // Default cleanup interval
	DefaultLogRetentionTime   = 3 * time.Hour    // Default log retention time
)

// getLogCleanupInterval returns the log cleanup interval from environment variable or default
func getLogCleanupInterval() time.Duration {
	intervalStr := os.Getenv("LOG_CLEANUP_INTERVAL")
	if intervalStr != "" {
		if interval, err := time.ParseDuration(intervalStr); err == nil {
			return interval
		}
	}
	return DefaultLogCleanupInterval
}

// getLogRetentionTime returns the log retention time from environment variable or default
func getLogRetentionTime() time.Duration {
	retentionStr := os.Getenv("LOG_RETENTION_TIME")
	if retentionStr != "" {
		if retention, err := time.ParseDuration(retentionStr); err == nil {
			return retention
		}
	}
	return DefaultLogRetentionTime
}

// LogEntry represents a single log entry for API calls
type LogEntry struct {
	Timestamp  time.Time `json:"timestamp"`
	Level      string    `json:"level"`
	ToolName   string    `json:"tool_name"`
	Message    string    `json:"message"`
	Category   string    `json:"category"`
	Details    string    `json:"details,omitempty"`
	Duration   string    `json:"duration,omitempty"`
	StatusCode int       `json:"status_code,omitempty"`
	URL        string    `json:"url,omitempty"`
	Method     string    `json:"method,omitempty"`
	SessionID  string    `json:"session_id,omitempty"`
}

// LogStore manages in-memory log storage with size limits and memory monitoring
type LogStore struct {
	mutex           sync.RWMutex
	entries         []LogEntry
	maxSize         int
	totalMemoryUsed int64     // Track memory usage
	lastCleanup     time.Time // Last cleanup time
	// Performance optimization: maintain indices for faster queries
	toolIndex  map[string][]int // Map tool names to entry indices
	levelIndex map[string][]int // Map log levels to entry indices
}

// NewLogStore creates a new log store with specified maximum size and validation
func NewLogStore(maxSize int) *LogStore {
	// Validate and constrain maxSize
	if maxSize <= 0 {
		maxSize = DefaultMaxLogEntries
	}
	if maxSize > MaxLogEntries {
		maxSize = MaxLogEntries
	}

	return &LogStore{
		entries:         make([]LogEntry, 0, maxSize),
		maxSize:         maxSize,
		totalMemoryUsed: 0,
		lastCleanup:     time.Now(),
		toolIndex:       make(map[string][]int),
		levelIndex:      make(map[string][]int),
	}
}

// AddEntry adds a new log entry to the store with memory management
func (ls *LogStore) AddEntry(entry LogEntry) {
	ls.mutex.Lock()
	defer ls.mutex.Unlock()

	// Estimate memory usage for this entry
	entrySize := ls.estimateEntrySize(entry)

	// Check if entry is too large
	if entrySize > MaxLogEntrySize {
		// Truncate large fields
		entry = ls.truncateEntry(entry)
		entrySize = ls.estimateEntrySize(entry)
	}

	// Add entry to main storage
	entryIndex := len(ls.entries)
	ls.entries = append(ls.entries, entry)
	ls.totalMemoryUsed += entrySize

	// Update indices for faster querying
	ls.updateIndices(entry, entryIndex)

	// Perform cleanup if needed
	ls.cleanupIfNeeded()
}

// estimateEntrySize estimates the memory usage of a log entry
func (ls *LogStore) estimateEntrySize(entry LogEntry) int64 {
	size := int64(200) // Base struct size
	size += int64(len(entry.ToolName))
	size += int64(len(entry.Message))
	size += int64(len(entry.Category))
	size += int64(len(entry.Details))
	size += int64(len(entry.Duration))
	size += int64(len(entry.URL))
	size += int64(len(entry.Method))
	size += int64(len(entry.SessionID))
	return size
}

// updateIndices maintains search indices for faster queries
func (ls *LogStore) updateIndices(entry LogEntry, index int) {
	// Update tool index
	if entry.ToolName != "" {
		ls.toolIndex[entry.ToolName] = append(ls.toolIndex[entry.ToolName], index)
	}

	// Update level index
	if entry.Level != "" {
		ls.levelIndex[entry.Level] = append(ls.levelIndex[entry.Level], index)
	}
}

// rebuildIndices rebuilds all indices after cleanup operations
func (ls *LogStore) rebuildIndices() {
	// Clear existing indices
	ls.toolIndex = make(map[string][]int)
	ls.levelIndex = make(map[string][]int)

	// Rebuild indices
	for i, entry := range ls.entries {
		ls.updateIndices(entry, i)
	}
}

// truncateEntry truncates large fields in a log entry
func (ls *LogStore) truncateEntry(entry LogEntry) LogEntry {
	const maxFieldSize = 2048

	if len(entry.Details) > maxFieldSize {
		entry.Details = entry.Details[:maxFieldSize] + "...[TRUNCATED]"
	}
	if len(entry.Message) > maxFieldSize {
		entry.Message = entry.Message[:maxFieldSize] + "...[TRUNCATED]"
	}
	if len(entry.URL) > maxFieldSize {
		entry.URL = entry.URL[:maxFieldSize] + "...[TRUNCATED]"
	}

	return entry
}

// cleanupIfNeeded performs cleanup when limits are exceeded
func (ls *LogStore) cleanupIfNeeded() {
	indicesNeedRebuild := false

	// Size-based cleanup
	if len(ls.entries) > ls.maxSize {
		// Remove oldest entries
		removeCount := len(ls.entries) - ls.maxSize
		for i := 0; i < removeCount; i++ {
			ls.totalMemoryUsed -= ls.estimateEntrySize(ls.entries[i])
		}
		ls.entries = ls.entries[removeCount:]
		indicesNeedRebuild = true
	}

	// Memory-based cleanup
	maxMemoryBytes := int64(MaxTotalLogMemoryMB * 1024 * 1024)
	for ls.totalMemoryUsed > maxMemoryBytes && len(ls.entries) > 0 {
		ls.totalMemoryUsed -= ls.estimateEntrySize(ls.entries[0])
		ls.entries = ls.entries[1:]
		indicesNeedRebuild = true
	}

	// Time-based cleanup (configured interval)
	if time.Since(ls.lastCleanup) > getLogCleanupInterval() {
		oldLength := len(ls.entries)
		ls.performTimeBasedCleanup()
		if len(ls.entries) != oldLength {
			indicesNeedRebuild = true
		}
		ls.lastCleanup = time.Now()
	}

	// Rebuild indices if entries were removed
	if indicesNeedRebuild {
		ls.rebuildIndices()
	}
}

// performTimeBasedCleanup removes old log entries
func (ls *LogStore) performTimeBasedCleanup() {
	cutoff := time.Now().Add(-1 * getLogRetentionTime()) // Remove logs older than configured retention time

	newEntries := make([]LogEntry, 0, len(ls.entries))
	newMemoryUsed := int64(0)

	for _, entry := range ls.entries {
		if entry.Timestamp.After(cutoff) {
			newEntries = append(newEntries, entry)
			newMemoryUsed += ls.estimateEntrySize(entry)
		}
	}

	ls.entries = newEntries
	ls.totalMemoryUsed = newMemoryUsed
}

// GetLogs returns logs for a specific tool, optionally filtered by level - optimized with indices
func (ls *LogStore) GetLogs(toolName string, level string, limit int) []LogEntry {
	ls.mutex.RLock()
	defer ls.mutex.RUnlock()

	// Input validation - sanitize parameters
	toolName = ls.sanitizeInput(toolName)
	level = ls.sanitizeLevel(level)
	limit = ls.validateLimit(limit)

	var candidateIndices []int

	// Use indices for faster filtering
	if toolName != "" && level != "" {
		// Filter by both tool and level - find intersection
		toolIndices := ls.toolIndex[toolName]
		levelIndices := ls.levelIndex[level]
		candidateIndices = ls.intersectIndices(toolIndices, levelIndices)
	} else if toolName != "" {
		// Filter by tool only
		candidateIndices = ls.toolIndex[toolName]
	} else if level != "" {
		// Filter by level only
		candidateIndices = ls.levelIndex[level]
	} else {
		// No filters - use all entries
		candidateIndices = make([]int, len(ls.entries))
		for i := range ls.entries {
			candidateIndices[i] = i
		}
	}

	// Collect filtered entries
	var filtered []LogEntry
	for _, idx := range candidateIndices {
		if idx < len(ls.entries) {
			filtered = append(filtered, ls.entries[idx])
		}
	}

	// Sort by timestamp (newest first)
	sort.Slice(filtered, func(i, j int) bool {
		return filtered[i].Timestamp.After(filtered[j].Timestamp)
	})

	// Apply limit
	if limit > 0 && len(filtered) > limit {
		filtered = filtered[:limit]
	}

	return filtered
}

// sanitizeInput cleans and validates string input
func (ls *LogStore) sanitizeInput(input string) string {
	// Remove potential injection characters and limit length
	input = strings.TrimSpace(input)
	if len(input) > 100 { // Reasonable limit for tool names
		input = input[:100]
	}
	// Remove control characters and potential injection patterns
	for _, char := range []string{"\n", "\r", "\t", "\x00", "<", ">", "&", "'", "\"", ";", "--"} {
		input = strings.ReplaceAll(input, char, "")
	}
	return input
}

// sanitizeLevel validates log level and returns only valid levels
func (ls *LogStore) sanitizeLevel(level string) string {
	level = strings.ToUpper(strings.TrimSpace(level))
	validLevels := map[string]bool{
		"DEBUG": true,
		"INFO":  true,
		"WARN":  true,
		"ERROR": true,
	}
	if validLevels[level] {
		return level
	}
	return "" // Return empty for invalid levels
}

// validateLimit ensures limit is within safe bounds
func (ls *LogStore) validateLimit(limit int) int {
	if limit <= 0 {
		return DefaultQueryLimit
	}
	if limit > MaxQueryLimit {
		return MaxQueryLimit
	}
	return limit
}

// intersectIndices finds common indices between two sorted slices
func (ls *LogStore) intersectIndices(a, b []int) []int {
	var result []int
	i, j := 0, 0

	for i < len(a) && j < len(b) {
		if a[i] == b[j] {
			result = append(result, a[i])
			i++
			j++
		} else if a[i] < b[j] {
			i++
		} else {
			j++
		}
	}
	return result
}

// GetAllLogs returns all logs, optionally limited
func (ls *LogStore) GetAllLogs(limit int) []LogEntry {
	return ls.GetLogs("", "", limit)
}

var globalLogStore = NewLogStore(DefaultMaxLogEntries) // Use safer default
type sseSession struct {
	writer              http.ResponseWriter
	flusher             http.Flusher
	done                chan struct{}
	eventQueue          chan string // Channel for queuing events
	sessionID           string
	notificationChannel chan mcp.JSONRPCNotification
	initialized         atomic.Bool
	
	// Authentication-related fields
	authManager     *AuthStateManager
	authConfig      map[string]*AuthConfig  // path -> AuthConfig mapping
	authConfigMutex sync.RWMutex            // protects authConfig map
	tokenExtractor  *TokenExtractor
	headerInjector  *AuthHeaderInjector
}

// SSEContextFunc is a function that takes an existing context and the current
// request and returns a potentially modified context based on the request
// content. This can be used to inject context values from headers, for example.
type SSEContextFunc func(ctx context.Context, r *http.Request) context.Context

func (s *sseSession) SessionID() string {
	return s.sessionID
}

func (s *sseSession) NotificationChannel() chan<- mcp.JSONRPCNotification {
	return s.notificationChannel
}

func (s *sseSession) Initialize() {
	s.initialized.Store(true)
}

func (s *sseSession) Initialized() bool {
	return s.initialized.Load()
}

// Authentication-related methods

// GetAuthManager returns the authentication state manager for this session
func (s *sseSession) GetAuthManager() *AuthStateManager {
	return s.authManager
}

// SetAuthConfig sets the authentication configuration for a specific path
func (s *sseSession) SetAuthConfig(path string, config *AuthConfig) {
	s.authConfigMutex.Lock()
	defer s.authConfigMutex.Unlock()
	
	if s.authConfig == nil {
		s.authConfig = make(map[string]*AuthConfig)
	}
	s.authConfig[path] = config
}

// GetAuthConfig retrieves the authentication configuration for a specific path
func (s *sseSession) GetAuthConfig(path string) (*AuthConfig, bool) {
	s.authConfigMutex.RLock()
	defer s.authConfigMutex.RUnlock()
	
	if s.authConfig == nil {
		return nil, false
	}
	config, exists := s.authConfig[path]
	return config, exists
}

// GetAllAuthConfigs returns all authentication configurations for this session
func (s *sseSession) GetAllAuthConfigs() map[string]*AuthConfig {
	s.authConfigMutex.RLock()
	defer s.authConfigMutex.RUnlock()
	
	if s.authConfig == nil {
		return make(map[string]*AuthConfig)
	}
	
	// Return a copy to prevent external modifications
	configs := make(map[string]*AuthConfig)
	for path, config := range s.authConfig {
		configs[path] = config
	}
	return configs
}

// GetTokenExtractor returns the token extractor for this session
func (s *sseSession) GetTokenExtractor() *TokenExtractor {
	return s.tokenExtractor
}

// GetHeaderInjector returns the auth header injector for this session
func (s *sseSession) GetHeaderInjector() *AuthHeaderInjector {
	return s.headerInjector
}

// ClearAuthConfig removes authentication configuration for a specific path
func (s *sseSession) ClearAuthConfig(path string) {
	s.authConfigMutex.Lock()
	defer s.authConfigMutex.Unlock()
	
	if s.authConfig != nil {
		delete(s.authConfig, path)
	}
}

// ClearAllAuthConfigs removes all authentication configurations for this session
func (s *sseSession) ClearAllAuthConfigs() {
	s.authConfigMutex.Lock()
	defer s.authConfigMutex.Unlock()
	
	if s.authConfig != nil {
		s.authConfig = make(map[string]*AuthConfig)
	}
}

var _ server.ClientSession = (*sseSession)(nil)

// SSEServer implements a Server-Sent Events (SSE) based MCP server.
// It provides real-time communication capabilities over HTTP using the SSE protocol.
type SSEServer struct {
	// server          *server.MCPServer
	servers         map[string]*server.MCPServer
	serversMutex    sync.RWMutex
	baseURL         string
	basePath        string
	messageEndpoint string
	sseEndpoint     string
	logsEndpoint    string // New endpoint for accessing logs
	sessions        sync.Map
	srv             *http.Server
	contextFunc     SSEContextFunc
	debugMode       bool      // Flag to enable/disable debug logging
	logPrefix       string    // Prefix for log messages
	logStore        *LogStore // Store for API call logs
	// Rate limiting for logs endpoint
	rateLimiter    map[string]time.Time // Simple IP-based rate limiting
	rateLimitMutex sync.RWMutex
}

// SSEOption defines a function type for configuring SSEServer
type SSEOption func(*SSEServer)

// WithBaseURL sets the base URL for the SSE server
func WithBaseURL(baseURL string) SSEOption {
	return func(s *SSEServer) {
		if baseURL != "" {
			u, err := url.Parse(baseURL)
			if err != nil {
				return
			}
			if u.Scheme != "http" && u.Scheme != "https" {
				return
			}
			// Check if the host is empty or only contains a port
			if u.Host == "" || strings.HasPrefix(u.Host, ":") {
				return
			}
			if len(u.Query()) > 0 {
				return
			}
		}
		s.baseURL = strings.TrimSuffix(baseURL, "/")
	}
}

// Add a new option for setting base path
func WithBasePath(basePath string) SSEOption {
	return func(s *SSEServer) {
		// Ensure the path starts with / and doesn't end with /
		if !strings.HasPrefix(basePath, "/") {
			basePath = "/" + basePath
		}
		s.basePath = strings.TrimSuffix(basePath, "/")
	}
}

// WithMessageEndpoint sets the message endpoint path
func WithMessageEndpoint(endpoint string) SSEOption {
	return func(s *SSEServer) {
		s.messageEndpoint = endpoint
	}
}

// WithSSEEndpoint sets the SSE endpoint path
func WithSSEEndpoint(endpoint string) SSEOption {
	return func(s *SSEServer) {
		s.sseEndpoint = endpoint
	}
}

// WithHTTPServer sets the HTTP server instance
func WithHTTPServer(srv *http.Server) SSEOption {
	return func(s *SSEServer) {
		s.srv = srv
	}
}

// WithContextFunc sets a function that will be called to customise the context
// to the server using the incoming request.
func WithSSEContextFunc(fn SSEContextFunc) SSEOption {
	return func(s *SSEServer) {
		s.contextFunc = fn
	}
}

// WithDebugMode sets the debug mode for logging
func WithDebugMode(debug bool) SSEOption {
	return func(s *SSEServer) {
		s.debugMode = debug
	}
}

// WithLogPrefix sets a custom prefix for log messages
func WithLogPrefix(prefix string) SSEOption {
	return func(s *SSEServer) {
		s.logPrefix = prefix
	}
}

// NewSSEServer creates a new SSE server instance with the given MCP server and options.
func NewSSEServer(opts ...SSEOption) *SSEServer {
	s := &SSEServer{
		servers:         map[string]*server.MCPServer{},
		sseEndpoint:     "/sse",
		messageEndpoint: "/message",
		logsEndpoint:    "/logs",
		logStore:        globalLogStore,
		rateLimiter:     make(map[string]time.Time),
	}

	// Apply all options
	for _, opt := range opts {
		opt(s)
	}

	return s
}

// Start begins serving SSE connections on the specified address.
// It sets up HTTP handlers for SSE and message endpoints.
func (s *SSEServer) Start(addr string) error {
	s.srv = &http.Server{
		Addr:    addr,
		Handler: s,
	}

	return s.srv.ListenAndServe()
}

// Shutdown gracefully stops the SSE server, closing all active sessions
// and shutting down the HTTP server.
func (s *SSEServer) Shutdown(ctx context.Context) error {
	if s.srv != nil {
		s.sessions.Range(func(key, value interface{}) bool {
			if session, ok := value.(*sseSession); ok {
				close(session.done)
			}
			s.sessions.Delete(key)
			return true
		})

		return s.srv.Shutdown(ctx)
	}
	return nil
}

// checkRateLimit implements simple IP-based rate limiting for logs endpoint
func (s *SSEServer) checkRateLimit(clientIP string) bool {
	s.rateLimitMutex.Lock()
	defer s.rateLimitMutex.Unlock()

	now := time.Now()
	lastRequest, exists := s.rateLimiter[clientIP]

	// Allow 1 request per second per IP
	if exists && now.Sub(lastRequest) < time.Second {
		return false
	}

	// Update last request time
	s.rateLimiter[clientIP] = now

	// Clean up old entries (older than 1 minute)
	for ip, timestamp := range s.rateLimiter {
		if now.Sub(timestamp) > time.Minute {
			delete(s.rateLimiter, ip)
		}
	}

	return true
}

// logMessage logs a message with the server's prefix if set
func (s *SSEServer) logMessage(format string, v ...interface{}) {
	if s.logPrefix != "" {
		format = fmt.Sprintf("[%s] %s", s.logPrefix, format)
	}
	log.Printf(format, v...)
}

// handleSSE handles incoming SSE connection requests.
// It sets up appropriate headers and creates a new session for the client.
func (s *SSEServer) handleSSE(mcpServer *server.MCPServer, w http.ResponseWriter, r *http.Request) {
	s.handleSSEWithSession(mcpServer, nil, w, r)
}

func (s *SSEServer) handleSSEWithSession(mcpServer *server.MCPServer, preCreatedSession *sseSession, w http.ResponseWriter, r *http.Request) {
	if r.Method != http.MethodGet {
		http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
		return
	}

	w.Header().Set("Content-Type", "text/event-stream")
	w.Header().Set("Cache-Control", "no-cache")
	w.Header().Set("Connection", "keep-alive")
	w.Header().Set("Access-Control-Allow-Origin", "*")

	flusher, ok := w.(http.Flusher)
	if !ok {
		http.Error(w, "Streaming unsupported", http.StatusInternalServerError)
		return
	}

	var session *sseSession
	var sessionID string

	if preCreatedSession != nil {
		// Use the pre-created session and complete its initialization
		session = preCreatedSession
		sessionID = session.sessionID
		session.writer = w
		session.flusher = flusher
		session.done = make(chan struct{})
		session.eventQueue = make(chan string, 100)
		session.notificationChannel = make(chan mcp.JSONRPCNotification, 100)
		s.logMessage("[CONNECTION] New user connected with pre-created session. Session ID: %s, Remote Address: %s", sessionID, r.RemoteAddr)
	} else {
		// Create a new session (fallback for compatibility)
		sessionID = uuid.New().String()
		s.logMessage("[CONNECTION] New user connected. Session ID: %s, Remote Address: %s", sessionID, r.RemoteAddr)
		session = &sseSession{
			writer:              w,
			flusher:             flusher,
			done:                make(chan struct{}),
			eventQueue:          make(chan string, 100), // Buffer for events
			sessionID:           sessionID,
			notificationChannel: make(chan mcp.JSONRPCNotification, 100),
			
			// Initialize authentication components
			authManager:     NewAuthStateManager(),
			authConfig:      make(map[string]*AuthConfig),
			tokenExtractor:  NewTokenExtractor(),
			headerInjector:  NewAuthHeaderInjector(),
		}
	}

	// Protect map write with mutex
	s.serversMutex.Lock()
	s.servers[sessionID] = mcpServer
	s.serversMutex.Unlock()

	s.sessions.Store(sessionID, session)
	defer s.sessions.Delete(sessionID)

	// Use read lock when accessing the map for reading
	s.serversMutex.RLock()
	mcpServer = s.servers[sessionID]
	s.serversMutex.RUnlock()

	if err := mcpServer.RegisterSession(session); err != nil {
		s.logMessage("[ERROR] Session registration failed: %v, Session ID: %s", err, sessionID)
		http.Error(w, fmt.Sprintf("Session registration failed: %v", err), http.StatusInternalServerError)
		return
	}
	defer func() {
		s.serversMutex.Lock()
		defer s.serversMutex.Unlock()
		mcpServer.UnregisterSession(sessionID)
		s.sessions.Delete(sessionID)
		
		// Clean up authentication states for this session
		if session.authManager != nil {
			session.authManager.Clear()
		}
		
		s.logMessage("[DISCONNECTION] User disconnected. Session ID: %s", sessionID)
	}()

	// Start notification handler for this session
	go func() {
		for {
			select {
			case notification := <-session.notificationChannel:
				eventData, err := json.Marshal(notification)
				if err == nil {
					if s.debugMode {
						s.logMessage("[NOTIFICATION] Sending notification to session %s: %s", sessionID, string(eventData))
					} else {
						s.logMessage("[NOTIFICATION] Sending notification to session %s", sessionID)
					}
					select {
					case session.eventQueue <- fmt.Sprintf("event: message\ndata: %s\n\n", eventData):
						// Event queued successfully
					case <-session.done:
						return
					}
				}
			case <-session.done:
				return
			case <-r.Context().Done():
				return
			}
		}
	}()

	messageEndpoint := fmt.Sprintf("%s?sessionId=%s", s.CompleteMessageEndpoint(), sessionID)
	s.logMessage("[ENDPOINT] Session %s message endpoint: %s", sessionID, messageEndpoint)

	// Send the initial endpoint event
	fmt.Fprintf(w, "event: endpoint\ndata: %s\r\n\r\n", messageEndpoint)
	flusher.Flush()

	// Main event loop - this runs in the HTTP handler goroutine
	for {
		select {
		case event := <-session.eventQueue:
			// Write the event to the response
			fmt.Fprint(w, event)
			flusher.Flush()
		case <-r.Context().Done():
			s.logMessage("[DISCONNECTION] Client connection terminated. Session ID: %s", sessionID)
			close(session.done)
			return
		}
	}
}

// handleMessage processes incoming JSON-RPC messages from clients and sends responses
// back through both the SSE connection and HTTP response.
func (s *SSEServer) handleMessage(w http.ResponseWriter, r *http.Request) {
	if r.Method != http.MethodPost {
		s.writeJSONRPCError(w, nil, mcp.INVALID_REQUEST, "Method not allowed")
		return
	}

	sessionID := r.URL.Query().Get("sessionId")
	if sessionID == "" {
		s.writeJSONRPCError(w, nil, mcp.INVALID_PARAMS, "Missing sessionId")
		return
	}

	s.logMessage("[MESSAGE] Received message for session ID: %s, Remote Address: %s", sessionID, r.RemoteAddr)

	sessionI, ok := s.sessions.Load(sessionID)
	if !ok {
		s.logMessage("[ERROR] Invalid session ID: %s", sessionID)
		s.writeJSONRPCError(w, nil, mcp.INVALID_PARAMS, "Invalid session ID")
		return
	}
	session := sessionI.(*sseSession)

	// Use read lock when accessing the map for reading
	s.serversMutex.RLock()
	server := s.servers[sessionID]
	s.serversMutex.RUnlock()

	// Use the retrieved server
	ctx := server.WithContext(r.Context(), session)
	if s.contextFunc != nil {
		ctx = s.contextFunc(ctx, r)
	}

	// Parse message as raw JSON
	var rawMessage json.RawMessage
	if err := json.NewDecoder(r.Body).Decode(&rawMessage); err != nil {
		s.logMessage("[ERROR] Parse error for session %s: %v", sessionID, err)
		s.writeJSONRPCError(w, nil, mcp.PARSE_ERROR, "Parse error")
		return
	}

	method := ""
	// Enhanced logging for MCP tool calls
	var request map[string]interface{}
	if err := json.Unmarshal(rawMessage, &request); err == nil {
		method, _ = request["method"].(string)
		params, hasParams := request["params"]

		// Log method and parameters, but skip detailed params for list methods
		if method != "tools/list" {
			if hasParams {
				// For non-list methods, log detailed parameters
				paramsJSON, err := json.Marshal(params)
				if err == nil {
					s.logMessage("[MCP TOOL CALL] Session %s: Method: %s, Params: %s", sessionID, method, string(paramsJSON))
				} else {
					s.logMessage("[MCP TOOL CALL] Session %s: Method: %s, Params: [error marshaling params]", sessionID, method)
				}
			} else {
				s.logMessage("[MCP TOOL CALL] Session %s: Method: %s, Params: none", sessionID, method)
			}
		} else {
			// Fallback for old behavior if JSON parsing fails
			if s.debugMode {
				s.logMessage("[DEBUG][TOOL CALL] Session %s received tool call: %s", sessionID, string(rawMessage))
			} else {
				s.logMessage("[TOOL CALL] Session %s received tool call", sessionID)
			}
		}

		// Process message through MCPServer
		response := server.HandleMessage(ctx, rawMessage)

		// Log the tool response (only in debug mode if it contains raw data)
		if response != nil {
			respData, _ := json.Marshal(response)

			// Extract result if present
			respMap := make(map[string]interface{})
			if err := json.Unmarshal(respData, &respMap); err == nil {
				if result, hasResult := respMap["result"]; hasResult && result != nil {
					if method != "tools/list" {
						s.logMessage("[MCP TOOL RESPONSE] Session %s: Method response", sessionID)
					}
				} else if errObj, hasError := respMap["error"]; hasError && errObj != nil {
					s.logMessage("[MCP TOOL RESPONSE] Session %s: Method responded with error", sessionID)
				} else {
					s.logMessage("[MCP TOOL RESPONSE] Session %s: Method responded", sessionID)
				}
			} else {
				// Fallback to old behavior if JSON parsing fails
				if s.debugMode {
					s.logMessage("[DEBUG][TOOL RESPONSE] Session %s tool response: %s", sessionID, string(respData))
				} else {
					s.logMessage("[TOOL RESPONSE] Session %s received response", sessionID)
				}
			}
		}

		// Only send response if there is one (not for notifications)
		if response != nil {
			eventData, _ := json.Marshal(response)

			// Queue the event for sending via SSE
			select {
			case session.eventQueue <- fmt.Sprintf("event: message\ndata: %s\n\n", eventData):
				// Event queued successfully
				s.logMessage("[EVENT QUEUED] Response queued for session %s", sessionID)
			case <-session.done:
				// Session is closed, don't try to queue
				s.logMessage("[EVENT FAILED] Cannot queue response - session %s is closed", sessionID)
			default:
				// Queue is full, could log this
				s.logMessage("[EVENT FAILED] Cannot queue response - session %s queue is full", sessionID)
			}

			// Send HTTP response
			w.Header().Set("Content-Type", "application/json")
			w.WriteHeader(http.StatusAccepted)
			json.NewEncoder(w).Encode(response)
		} else {
			// For notifications, just send 202 Accepted with no body
			s.logMessage("[NOTIFICATION] No response needed for session %s", sessionID)
			w.WriteHeader(http.StatusAccepted)
		}
	}
}

// writeJSONRPCError writes a JSON-RPC error response with the given error details.
func (s *SSEServer) writeJSONRPCError(
	w http.ResponseWriter,
	id interface{},
	code int,
	message string,
) {
	response := createErrorResponse(id, code, message)
	w.Header().Set("Content-Type", "application/json")
	w.WriteHeader(http.StatusBadRequest)
	json.NewEncoder(w).Encode(response)
}

// SendEventToSession sends an event to a specific SSE session identified by sessionID.
// Returns an error if the session is not found or closed.
func (s *SSEServer) SendEventToSession(
	sessionID string,
	event interface{},
) error {
	sessionI, ok := s.sessions.Load(sessionID)
	if !ok {
		return fmt.Errorf("session not found: %s", sessionID)
	}
	session := sessionI.(*sseSession)

	eventData, err := json.Marshal(event)
	if err != nil {
		return err
	}

	// Queue the event for sending via SSE
	select {
	case session.eventQueue <- fmt.Sprintf("event: message\ndata: %s\n\n", eventData):
		return nil
	case <-session.done:
		return fmt.Errorf("session closed")
	default:
		return fmt.Errorf("event queue full")
	}
}

func (s *SSEServer) GetUrlPath(input string) (string, error) {
	parse, err := url.Parse(input)
	if err != nil {
		return "", fmt.Errorf("failed to parse URL %s: %w", input, err)
	}
	return parse.Path, nil
}

func (s *SSEServer) CompleteSseEndpoint() string {
	return s.baseURL + s.basePath + s.sseEndpoint
}
func (s *SSEServer) CompleteSsePath() string {
	path, err := s.GetUrlPath(s.CompleteSseEndpoint())
	if err != nil {
		return s.basePath + s.sseEndpoint
	}
	return path
}

func (s *SSEServer) CompleteMessageEndpoint() string {
	return s.baseURL + s.basePath + s.messageEndpoint
}
func (s *SSEServer) CompleteMessagePath() string {
	path, err := s.GetUrlPath(s.CompleteMessageEndpoint())
	if err != nil {
		return s.basePath + s.messageEndpoint
	}
	return path
}

func (s *SSEServer) CompleteLogsEndpoint() string {
	return s.baseURL + s.basePath + s.logsEndpoint
}
func (s *SSEServer) CompleteLogsPath() string {
	path, err := s.GetUrlPath(s.CompleteLogsEndpoint())
	if err != nil {
		return s.basePath + s.logsEndpoint
	}
	return path
}

// RequestParams holds the parameters for creating an MCP server from an OpenAPI spec.
type RequestParams struct {
	SchemaURL string
	BaseURL   string
	Headers   map[string]string
	RawBytes  []byte
	Filters   []PathFilter
	ToolID    string // 添加工具ID字段
	Error     error
}

// PathFilter defines a filter to include or exclude API paths
type PathFilter struct {
	Pattern string   `json:"pattern"` // Path pattern (supports glob: * for segment, ** for multiple segments)
	Methods []string `json:"methods"` // HTTP methods to filter (GET, POST, etc.); empty means all methods
	Exclude bool     `json:"exclude"` // If true, this is an exclusion filter
}

// MatchesPath checks if a path matches this filter
func (f PathFilter) MatchesPath(path string) bool {
	return matchGlob(f.Pattern, path)
}

// MatchesMethod checks if a method matches this filter
func (f PathFilter) MatchesMethod(method string) bool {
	if len(f.Methods) == 0 {
		return true // No methods specified means match all methods
	}

	for _, m := range f.Methods {
		if strings.EqualFold(m, method) {
			return true
		}
	}
	return false
}

// matchGlob implements a simple glob pattern matching
// * matches any single path segment
// ** matches zero or more path segments
func matchGlob(pattern, path string) bool {
	if pattern == "*" || pattern == "**" {
		return true
	}

	// Split pattern and path into segments
	patternSegs := strings.Split(strings.Trim(pattern, "/"), "/")
	pathSegs := strings.Split(strings.Trim(path, "/"), "/")

	return matchGlobSegments(patternSegs, pathSegs)
}

// matchGlobSegments is a recursive helper for matchGlob
func matchGlobSegments(pattern, path []string) bool {
	// Base cases
	if len(pattern) == 0 {
		return len(path) == 0
	}

	// Handle ** (matches zero or more segments)
	if pattern[0] == "**" {
		// Try to match at current position
		if matchGlobSegments(pattern[1:], path) {
			return true
		}
		// Try to consume one segment of path and match again
		if len(path) > 0 {
			return matchGlobSegments(pattern, path[1:])
		}
		return false
	}

	// For normal segments or * wildcard
	if len(path) == 0 {
		return false
	}

	// * matches any segment
	if pattern[0] == "*" || pattern[0] == path[0] {
		return matchGlobSegments(pattern[1:], path[1:])
	}

	return false
}

// FilterExpression represents a single filter expression in the DSL
type FilterExpression struct {
	Include bool
	Pattern string
	Methods []string
}

// ParseFilterDSL parses a filter DSL string into a FilterDSL object
//
// The DSL syntax is as follows:
// - "+" at start means include (default), "-" means exclude
// - Path pattern follows (supports glob: * for segment, ** for multiple segments)
// - Optional ":METHOD1 METHOD2" suffix to specify HTTP methods (space-separated)
// - Multiple expressions separated by semicolons
//
// Examples:
// - "+/api/**" - Include all endpoints under /api/
// - "-/api/admin/**" - Exclude all endpoints under /api/admin/
// - "+/users/*:GET" - Include GET endpoints for /users/{id}
// - "+/**:GET;-/internal/**" - Include all GET endpoints except those under /internal/
func ParseFilterDSL(dsl string) FilterDSL {
	result := FilterDSL{}
	expressions := strings.Split(dsl, ";")

	for _, expr := range expressions {
		expr = strings.TrimSpace(expr)
		if expr == "" {
			continue
		}

		expression := FilterExpression{
			Include: true,
		}

		// Check for include/exclude prefix
		if strings.HasPrefix(expr, "+") {
			expression.Include = true
			expr = expr[1:]
		} else if strings.HasPrefix(expr, "-") {
			expression.Include = false
			expr = expr[1:]
		}

		// Check for method suffix
		parts := strings.Split(expr, ":")
		expression.Pattern = parts[0]

		if len(parts) > 1 && parts[1] != "" {
			// Methods are space-separated
			methods := strings.Fields(parts[1])
			for _, method := range methods {
				method = strings.TrimSpace(method)
				if method != "" {
					expression.Methods = append(expression.Methods, strings.ToUpper(method))
				}
			}
		}

		result.Expressions = append(result.Expressions, expression)
	}

	return result
}

// FilterDSL provides a domain-specific language for filtering API paths
type FilterDSL struct {
	Expressions []FilterExpression
}

// ToPathFilters converts the FilterDSL to a slice of PathFilter objects
func (dsl FilterDSL) ToPathFilters() []PathFilter {
	result := make([]PathFilter, 0, len(dsl.Expressions))

	for _, expr := range dsl.Expressions {
		filter := PathFilter{
			Pattern: expr.Pattern,
			Methods: expr.Methods,
			Exclude: !expr.Include,
		}
		result = append(result, filter)
	}

	return result
}

// ApplyFilters applies the specified filters to the OpenAPI paths and methods
func ApplyFilters(apis []APIEndpoint, filters []PathFilter) []APIEndpoint {
	if len(filters) == 0 {
		return apis // No filtering needed
	}

	var filteredAPIs []APIEndpoint
	for _, api := range apis {
		if ShouldIncludePath(api.Path, api.Method, filters) {
			filteredAPIs = append(filteredAPIs, api)
		}
	}

	return filteredAPIs
}

// parseRequestParams extracts parameters from the request URL query.
// If a 'code' parameter is present, it attempts to base64 decode it to get parameters.
// Otherwise, it parses individual query parameters.
func (s *SSEServer) parseRequestParams(r *http.Request) RequestParams {
	query := r.URL.Query()
	params := RequestParams{
		Headers: make(map[string]string),
	}

	// Check if we have a base64 encoded 'code' parameter
	if encodedParams := query.Get("code"); encodedParams != "" {
		// Decode base64 string
		decodedBytes, err := Base64Decode(encodedParams)
		if err != nil {
			params.Error = fmt.Errorf("failed to decode parameters: %w", err)
			return params
		}

		// Parse decoded JSON into params
		var decodedParams map[string]interface{}
		if err := json.Unmarshal(decodedBytes, &decodedParams); err != nil {
			params.Error = fmt.Errorf("failed to parse decoded parameters: %w", err)
			return params
		}

		// Extract parameters from decoded JSON
		if schema, ok := decodedParams["s"].(string); ok {
			params.SchemaURL = schema
		}
		if baseURL, ok := decodedParams["u"].(string); ok {
			params.BaseURL = baseURL
		}
		// Extract tool ID if present
		if toolID, ok := decodedParams["t"].(string); ok {
			params.ToolID = toolID
		}

		if headers, ok := decodedParams["h"].(map[string]interface{}); ok {
			for key, value := range headers {
				if strValue, ok := value.(string); ok {
					params.Headers[key] = strValue
				}
			}
		}

		// Parse filter DSL string from f parameter
		if filterDSL, ok := decodedParams["f"].(string); ok {
			dsl := ParseFilterDSL(filterDSL)
			params.Filters = append(params.Filters, dsl.ToPathFilters()...)
		}
	} else {
		// Traditional parameter parsing
		params.SchemaURL = query.Get("s")
		params.BaseURL = query.Get("u")
		// Extract tool ID if present
		params.ToolID = query.Get("t")

		h := query.Get("h")
		if h != "" {
			if err := json.Unmarshal([]byte(h), &params.Headers); err != nil {
				params.Error = fmt.Errorf("failed to parse headers: %w", err)
				return params
			}
		}

		// Parse DSL filters from f parameter (can be multiple)
		filterValues := query["f"]
		for _, filterDSL := range filterValues {
			if filterDSL != "" {
				dsl := ParseFilterDSL(filterDSL)
				params.Filters = append(params.Filters, dsl.ToPathFilters()...)
			}
		}
	}

	// Load schema content if provided
	if params.SchemaURL != "" {
		var err error
		// Check if schemaURL is a local file or a URL
		if strings.HasPrefix(params.SchemaURL, "http://") || strings.HasPrefix(params.SchemaURL, "https://") {
			// Fetch from URL
			params.RawBytes, err = getSchemaURL(params.SchemaURL)
			if err != nil {
				params.Error = fmt.Errorf("failed to fetch schema from URL: %w", err)
				return params
			}
		} else {
			params.RawBytes, err = os.ReadFile(params.SchemaURL)
			if err != nil {
				params.Error = fmt.Errorf("failed to load schema: %w", err)
				return params
			}
		}
	}

	return params
}

// Base64Decode decodes a base64 string to bytes
func Base64Decode(encoded string) ([]byte, error) {
	return base64.StdEncoding.DecodeString(encoded)
}

// ServeHTTP implements the http.Handler interface.
func (s *SSEServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	path := r.URL.Path
	// Use exact path matching rather than Contains
	ssePath := s.CompleteSsePath()
	if ssePath != "" && path == ssePath {
		s.logMessage("[REQUEST] SSE connection request from %s", r.RemoteAddr)

		// Parse request parameters
		params := s.parseRequestParams(r)
		if params.Error != nil {
			s.logMessage("[ERROR] Failed to parse request parameters: %v", params.Error)
			http.Error(w, fmt.Sprintf("Failed to parse request parameters: %v", params.Error), http.StatusInternalServerError)
			return
		}

		var mcpServer *server.MCPServer

		// First, try with our custom parser
		var parser OpenAPIParser
		var parseErr error

		// Update log prefix based on schema info if not already set
		if s.logPrefix == "" && params.BaseURL != "" {
			// Use the baseURL from the schema as the log prefix if not already set
			baseURLHost, _ := getHostFromURL(params.BaseURL)
			if baseURLHost != "" {
				s.logPrefix = baseURLHost
			}
		}

		// Check if it looks like YAML or JSON
		if isYAML(params.RawBytes) {
			s.logMessage("[PARSER] Parsing YAML OpenAPI schema, size: %d bytes", len(params.RawBytes))
			parser, parseErr = ParseOpenAPIFromYAML(params.RawBytes)
		} else {
			s.logMessage("[PARSER] Parsing JSON OpenAPI schema, size: %d bytes", len(params.RawBytes))
			parser, parseErr = ParseOpenAPIFromJSON(params.RawBytes)
		}
		if parseErr != nil {
			s.logMessage("[ERROR] Failed to parse OpenAPI schema: %v", parseErr)
			http.Error(w, fmt.Sprintf("Failed to parse OpenAPI schema: %v", parseErr), http.StatusInternalServerError)
			return
		}

		// Apply filters if present
		if len(params.Filters) > 0 {
			s.logMessage("[FILTERS] Applying %d filters to API endpoints", len(params.Filters))
			// Create a filtered parser that wraps the original parser
			parser = &FilteredOpenAPIParser{
				BaseParser: parser,
				Filters:    params.Filters,
			}
		}

		// Create a temporary session for dynamic authentication support
		// This session will be used during MCP server creation and then passed to handleSSE
		sessionID := uuid.New().String()
		tempSession := &sseSession{
			sessionID:       sessionID,
			authManager:     NewAuthStateManager(),
			authConfig:      make(map[string]*AuthConfig),
			tokenExtractor:  NewTokenExtractor(),
			headerInjector:  NewAuthHeaderInjector(),
		}

		var err error
		s.logMessage("[SERVER] Creating MCP server with dynamic authentication support, base URL: %s", params.BaseURL)
		mcpServer, err = NewMCPFromCustomParserWithDynamicAuth(params.BaseURL, params.Headers, parser, params.ToolID, tempSession)
		if err != nil {
			s.logMessage("[ERROR] Failed to create MCP server: %v", err)
			http.Error(w, fmt.Sprintf("Failed to create MCP server: %v", err), http.StatusInternalServerError)
			return
		}

		// Log the available API endpoints
		apis := parser.APIs()
		s.logMessage("[SERVER] MCP server created with %d API endpoints", len(apis))

		// Only log detailed endpoints in debug mode
		if s.debugMode {
			for i, api := range apis {
				if i < 10 { // Limit logging to first 10 endpoints to avoid flooding logs
					s.logMessage("[DEBUG][ENDPOINT] %s %s", api.Method, api.Path)
				} else if i == 10 {
					s.logMessage("[DEBUG][ENDPOINT] ... and %d more endpoints", len(apis)-10)
					break
				}
			}
		}

		s.handleSSEWithSession(mcpServer, tempSession, w, r)
		return
	}
	messagePath := s.CompleteMessagePath()
	if messagePath != "" && path == messagePath {
		s.logMessage("[REQUEST] Message request from %s to %s", r.RemoteAddr, path)
		s.handleMessage(w, r)
		return
	}

	// Handle logs endpoint
	logsPath := s.CompleteLogsPath()
	if logsPath != "" && path == logsPath {
		s.logMessage("[REQUEST] Logs request from %s to %s", r.RemoteAddr, path)
		s.handleLogs(w, r)
		return
	}

	s.logMessage("[NOT FOUND] Path not found: %s", path)
	http.NotFound(w, r)
}

// FilteredOpenAPIParser is a wrapper around an OpenAPIParser that filters APIs
type FilteredOpenAPIParser struct {
	BaseParser OpenAPIParser
	Filters    []PathFilter
}

// Ensure FilteredOpenAPIParser implements the OpenAPIParser interface
var _ OpenAPIParser = (*FilteredOpenAPIParser)(nil)

// Servers delegates to the base parser
func (f *FilteredOpenAPIParser) Servers() []Server {
	return f.BaseParser.Servers()
}

// Info delegates to the base parser
func (f *FilteredOpenAPIParser) Info() APIInfo {
	return f.BaseParser.Info()
}

// APIs returns filtered APIs from the base parser
func (f *FilteredOpenAPIParser) APIs() []APIEndpoint {
	allAPIs := f.BaseParser.APIs()
	return ApplyFilters(allAPIs, f.Filters)
}

// GetTLSSkipVerify delegates to the base parser
func (f *FilteredOpenAPIParser) GetTLSSkipVerify() bool {
	return f.BaseParser.GetTLSSkipVerify()
}

// isYAML checks if data looks like YAML
func isYAML(data []byte) bool {
	// Simple heuristic: check for common YAML indicators
	s := string(data)
	return strings.Contains(s, "---") ||
		strings.Contains(s, ":") && strings.Contains(s, "\n") ||
		strings.HasPrefix(strings.TrimSpace(s), "openapi:") ||
		strings.HasPrefix(strings.TrimSpace(s), "swagger:")
}

func createResponse(id interface{}, result interface{}) mcp.JSONRPCMessage {
	return mcp.JSONRPCResponse{
		JSONRPC: mcp.JSONRPC_VERSION,
		ID:      id,
		Result:  result,
	}
}

func createErrorResponse(
	id interface{},
	code int,
	message string,
) mcp.JSONRPCMessage {
	return mcp.JSONRPCError{
		JSONRPC: mcp.JSONRPC_VERSION,
		ID:      id,
		Error: struct {
			Code    int         `json:"code"`
			Message string      `json:"message"`
			Data    interface{} `json:"data,omitempty"`
		}{
			Code:    code,
			Message: message,
		},
	}
}

func getSchemaURL(schemaURL string) ([]byte, error) {
	if strings.HasPrefix(schemaURL, "http://") || strings.HasPrefix(schemaURL, "https://") {
		resp, httpErr := http.Get(schemaURL)
		if httpErr != nil {
			return nil, fmt.Errorf("failed to fetch schema from URL: %v", httpErr)
		}
		defer resp.Body.Close()
		return io.ReadAll(resp.Body)
	}
	return os.ReadFile(schemaURL)
}

// ShouldIncludePath determines if a path and method should be included based on filters
func ShouldIncludePath(path string, method string, filters []PathFilter) bool {
	// If no filters are defined, include everything
	if len(filters) == 0 {
		return true
	}

	// Track if we've seen any include filters
	hasIncludeFilters := false
	for _, filter := range filters {
		if !filter.Exclude {
			hasIncludeFilters = true
			break
		}
	}

	// Default behavior depends on whether we have any include filters
	// If we have include filters, default is to exclude unless explicitly included
	// If we only have exclude filters, default is to include unless explicitly excluded
	defaultInclude := !hasIncludeFilters

	// First apply include filters
	included := defaultInclude

	// Apply include filters first
	if hasIncludeFilters {
		included = false
		for _, filter := range filters {
			if !filter.Exclude && filter.MatchesPath(path) && filter.MatchesMethod(method) {
				included = true
				break
			}
		}
	}

	// Then apply exclude filters
	for _, filter := range filters {
		if filter.Exclude && filter.MatchesPath(path) && filter.MatchesMethod(method) {
			included = false
			break
		}
	}

	return included
}

// getHostFromURL extracts the host from a URL string
func getHostFromURL(urlStr string) (string, error) {
	parsedURL, err := url.Parse(urlStr)
	if err != nil {
		return "", err
	}
	return parsedURL.Hostname(), nil
}

// allowedOrigins defines the whitelist of allowed origins for CORS
var allowedOrigins = []string{
	"http://localhost:3000", // Production frontend
	"http://localhost:5173", // Vite dev server
	"http://localhost:5174", // Alternative dev port
	"http://127.0.0.1:3000",
	"http://127.0.0.1:5173",
	"http://127.0.0.1:5174",
}

// isAllowedOrigin checks if the origin is in the allowed list
func isAllowedOrigin(origin string) bool {
	for _, allowed := range allowedOrigins {
		if origin == allowed {
			return true
		}
	}
	return false
}

// setCORSHeaders sets secure CORS headers
func setCORSHeaders(w http.ResponseWriter, r *http.Request) {
	origin := r.Header.Get("Origin")
	if isAllowedOrigin(origin) {
		w.Header().Set("Access-Control-Allow-Origin", origin)
	} else {
		// For development, allow localhost variations
		if strings.HasPrefix(origin, "http://localhost:") || strings.HasPrefix(origin, "http://127.0.0.1:") {
			w.Header().Set("Access-Control-Allow-Origin", origin)
		}
	}
	w.Header().Set("Access-Control-Allow-Methods", "GET, OPTIONS")
	w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
	w.Header().Set("Access-Control-Max-Age", "86400") // 24 hours
}

// handleLogs handles HTTP requests to the logs endpoint with enhanced validation
func (s *SSEServer) handleLogs(w http.ResponseWriter, r *http.Request) {
	// Set secure CORS headers
	setCORSHeaders(w, r)

	if r.Method == "OPTIONS" {
		w.WriteHeader(http.StatusOK)
		return
	}

	if r.Method != "GET" {
		s.logMessage("[LOGS_API] Invalid method %s from %s", r.Method, r.RemoteAddr)
		http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
		return
	}

	// Rate limiting check (simple IP-based)
	if !s.checkRateLimit(r.RemoteAddr) {
		s.logMessage("[LOGS_API] Rate limit exceeded for %s", r.RemoteAddr)
		http.Error(w, "Rate limit exceeded", http.StatusTooManyRequests)
		return
	}

	// Parse and validate query parameters
	query := r.URL.Query()
	toolName := query.Get("tool")
	level := query.Get("level")
	limitStr := query.Get("limit")

	// Enhanced input validation
	if len(toolName) > 100 {
		s.logMessage("[LOGS_API] Tool name too long from %s", r.RemoteAddr)
		http.Error(w, "Tool name too long", http.StatusBadRequest)
		return
	}

	// Validate and parse limit with better error handling
	limit := DefaultQueryLimit
	if limitStr != "" {
		if parsedLimit, err := strconv.Atoi(limitStr); err != nil {
			s.logMessage("[LOGS_API] Invalid limit parameter from %s: %s", r.RemoteAddr, limitStr)
			http.Error(w, "Invalid limit parameter", http.StatusBadRequest)
			return
		} else {
			limit = parsedLimit
		}
	}

	// Validate limit bounds
	if limit < 0 {
		s.logMessage("[LOGS_API] Negative limit from %s: %d", r.RemoteAddr, limit)
		http.Error(w, "Limit must be non-negative", http.StatusBadRequest)
		return
	}
	if limit > MaxQueryLimit {
		limit = MaxQueryLimit // Cap at maximum instead of error
	}

	// Validate log level if provided
	if level != "" {
		validLevels := map[string]bool{"DEBUG": true, "INFO": true, "WARN": true, "ERROR": true}
		if !validLevels[strings.ToUpper(level)] {
			s.logMessage("[LOGS_API] Invalid log level from %s: %s", r.RemoteAddr, level)
			http.Error(w, "Invalid log level. Valid levels: DEBUG, INFO, WARN, ERROR", http.StatusBadRequest)
			return
		}
	}

	// Log the request for audit purposes
	s.logMessage("[LOGS_API] Request from %s - tool: %s, level: %s, limit: %d", r.RemoteAddr, toolName, level, limit)

	// Get logs from store with error handling
	var logs []LogEntry
	var err error

	// Use a timeout context to prevent hanging
	ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
	defer cancel()

	// Execute query in a goroutine with timeout
	done := make(chan struct{})
	go func() {
		defer func() {
			if recover() != nil {
				err = fmt.Errorf("internal error during log query")
			}
			close(done)
		}()
		logs = s.logStore.GetLogs(toolName, level, limit)
	}()

	select {
	case <-done:
		if err != nil {
			s.logMessage("[LOGS_API] Error querying logs: %v", err)
			http.Error(w, "Internal server error", http.StatusInternalServerError)
			return
		}
	case <-ctx.Done():
		s.logMessage("[LOGS_API] Query timeout for %s", r.RemoteAddr)
		http.Error(w, "Query timeout", http.StatusRequestTimeout)
		return
	}

	// Ensure logs is never nil for JSON response
	if logs == nil {
		logs = []LogEntry{} // Return empty array instead of null
	}

	// Return JSON response with enhanced metadata
	w.Header().Set("Content-Type", "application/json")
	w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
	w.Header().Set("X-Content-Type-Options", "nosniff")

	response := map[string]interface{}{
		"success":   true,
		"logs":      logs,
		"total":     len(logs),
		"timestamp": time.Now().Unix(),
		"filters": map[string]interface{}{
			"tool":  toolName,
			"level": level,
			"limit": limit,
		},
	}

	if err := json.NewEncoder(w).Encode(response); err != nil {
		s.logMessage("[LOGS_API] Failed to encode response: %v", err)
		http.Error(w, "Internal server error", http.StatusInternalServerError)
		return
	}

	s.logMessage("[LOGS_API] Successfully returned %d logs to %s", len(logs), r.RemoteAddr)
}
