package memory

import (
	"context"
	"fmt"
	"log"
	"os"
	"sync"
	"time"

	"github.com/google/uuid"
	"github.com/louloulin/dataflare/pkg/cluster/clusterapi"
)

// Manager is a memory-based cluster manager for testing
type Manager struct {
	// config is the cluster configuration
	config *clusterapi.Config
	// localNode is information about the local node
	localNode *clusterapi.NodeInfo
	// nodes is a map of node IDs to node information
	nodes map[string]*clusterapi.NodeInfo
	// metadata is a map of keys to metadata values
	metadata map[string]string
	// eventChannels is a map of event types to channels
	eventChannels map[string][]chan clusterapi.Event
	// isLeader indicates if this node is the leader
	isLeader bool
	// leaderID is the ID of the current leader
	leaderID string
	// mu is a mutex for protecting shared state
	mu sync.RWMutex
	// logger is the logger for the cluster manager
	logger *log.Logger
	// ctx is the context for the cluster manager
	ctx context.Context
	// cancel is the cancel function for the context
	cancel context.CancelFunc
}

// NewManager creates a new memory-based cluster manager
func NewManager(config *clusterapi.Config) (clusterapi.Manager, error) {
	// Validate configuration
	if err := config.Validate(); err != nil {
		return nil, fmt.Errorf("invalid cluster configuration: %w", err)
	}

	// Generate node ID if not provided
	if config.NodeID == "" {
		config.NodeID = uuid.New().String()
	}

	// Create context
	ctx, cancel := context.WithCancel(context.Background())

	// Create local node
	localNode := &clusterapi.NodeInfo{
		ID:        config.NodeID,
		Address:   config.AdvertiseAddress,
		Role:      config.Role,
		Tags:      config.Tags,
		Metadata:  config.Metadata,
		Status:    "ready",
		StartTime: time.Now(),
		LastSeen:  time.Now(),
	}

	// Create manager
	manager := &Manager{
		config:        config,
		localNode:     localNode,
		nodes:         make(map[string]*clusterapi.NodeInfo),
		metadata:      make(map[string]string),
		eventChannels: make(map[string][]chan clusterapi.Event),
		isLeader:      true, // Memory manager is always the leader
		leaderID:      config.NodeID,
		logger:        log.New(os.Stderr, "[MemoryManager] ", log.LstdFlags),
		ctx:           ctx,
		cancel:        cancel,
	}

	// Add local node to nodes map
	manager.nodes[config.NodeID] = localNode

	return manager, nil
}

// Start starts the cluster manager
func (m *Manager) Start(ctx context.Context) error {
	m.mu.Lock()
	defer m.mu.Unlock()

	m.logger.Printf("Starting memory-based cluster manager for node %s", m.config.NodeID)

	// Broadcast node join event
	m.broadcastEvent(clusterapi.Event{
		Type:      clusterapi.EventTypeNodeJoin,
		NodeID:    m.config.NodeID,
		Node:      m.localNode,
		Timestamp: time.Now(),
	})

	// Broadcast leader changed event
	m.broadcastEvent(clusterapi.Event{
		Type:      clusterapi.EventTypeLeaderChanged,
		NodeID:    m.config.NodeID,
		Node:      m.localNode,
		Timestamp: time.Now(),
		Data: map[string]interface{}{
			"old_leader_id": "",
		},
	})

	return nil
}

// Stop stops the cluster manager
func (m *Manager) Stop(ctx context.Context) error {
	m.mu.Lock()
	defer m.mu.Unlock()

	m.logger.Printf("Stopping memory-based cluster manager for node %s", m.config.NodeID)

	// Broadcast node leave event
	m.broadcastEvent(clusterapi.Event{
		Type:      clusterapi.EventTypeNodeLeave,
		NodeID:    m.config.NodeID,
		Node:      m.localNode,
		Timestamp: time.Now(),
	})

	// Cancel context
	m.cancel()

	// Close all event channels
	for eventType, channels := range m.eventChannels {
		for _, ch := range channels {
			close(ch)
		}
		delete(m.eventChannels, eventType)
	}

	return nil
}

// Join joins the cluster using the provided seed nodes
func (m *Manager) Join(ctx context.Context, addresses []string) error {
	m.mu.Lock()
	defer m.mu.Unlock()

	m.logger.Printf("Joining cluster with seed nodes: %v", addresses)

	// In memory mode, we don't need to do anything
	return nil
}

// Leave leaves the cluster
func (m *Manager) Leave(ctx context.Context) error {
	m.mu.Lock()
	defer m.mu.Unlock()

	m.logger.Printf("Leaving cluster")

	// In memory mode, we don't need to do anything
	return nil
}

// GetLocalNode gets information about the local node
func (m *Manager) GetLocalNode(ctx context.Context) (*clusterapi.NodeInfo, error) {
	m.mu.RLock()
	defer m.mu.RUnlock()

	return m.localNode, nil
}

// GetNode gets information about a node
func (m *Manager) GetNode(ctx context.Context, nodeID string) (*clusterapi.NodeInfo, error) {
	m.mu.RLock()
	defer m.mu.RUnlock()

	node, ok := m.nodes[nodeID]
	if !ok {
		return nil, fmt.Errorf("node not found: %s", nodeID)
	}

	return node, nil
}

// GetNodes gets information about all nodes
func (m *Manager) GetNodes(ctx context.Context) ([]*clusterapi.NodeInfo, error) {
	m.mu.RLock()
	defer m.mu.RUnlock()

	nodes := make([]*clusterapi.NodeInfo, 0, len(m.nodes))
	for _, node := range m.nodes {
		nodes = append(nodes, node)
	}

	return nodes, nil
}

// GetLeader gets information about the leader node
func (m *Manager) GetLeader(ctx context.Context) (*clusterapi.NodeInfo, error) {
	m.mu.RLock()
	defer m.mu.RUnlock()

	if m.leaderID == "" {
		return nil, fmt.Errorf("no leader elected")
	}

	node, ok := m.nodes[m.leaderID]
	if !ok {
		return nil, fmt.Errorf("leader node not found: %s", m.leaderID)
	}

	return node, nil
}

// IsLeader checks if the local node is the leader
func (m *Manager) IsLeader(ctx context.Context) (bool, error) {
	m.mu.RLock()
	defer m.mu.RUnlock()

	return m.isLeader, nil
}

// GetMetadata gets metadata
func (m *Manager) GetMetadata(ctx context.Context, key string) (string, error) {
	m.mu.RLock()
	defer m.mu.RUnlock()

	value, ok := m.metadata[key]
	if !ok {
		return "", fmt.Errorf("metadata not found: %s", key)
	}

	return value, nil
}

// SetMetadata sets metadata
func (m *Manager) SetMetadata(ctx context.Context, key string, value string) error {
	m.mu.Lock()
	defer m.mu.Unlock()

	m.metadata[key] = value

	// Broadcast metadata changed event
	m.broadcastEvent(clusterapi.Event{
		Type:      clusterapi.EventTypeMetadataChanged,
		NodeID:    m.config.NodeID,
		Node:      m.localNode,
		Timestamp: time.Now(),
		Data: map[string]interface{}{
			"key":   key,
			"value": value,
		},
	})

	return nil
}

// DeleteMetadata deletes metadata
func (m *Manager) DeleteMetadata(ctx context.Context, key string) error {
	m.mu.Lock()
	defer m.mu.Unlock()

	delete(m.metadata, key)

	// Broadcast metadata changed event
	m.broadcastEvent(clusterapi.Event{
		Type:      clusterapi.EventTypeMetadataChanged,
		NodeID:    m.config.NodeID,
		Node:      m.localNode,
		Timestamp: time.Now(),
		Data: map[string]interface{}{
			"key":   key,
			"value": "",
		},
	})

	return nil
}

// Subscribe subscribes to cluster events
func (m *Manager) Subscribe(ctx context.Context, eventType string) (<-chan clusterapi.Event, error) {
	m.mu.Lock()
	defer m.mu.Unlock()

	// Create event channel
	ch := make(chan clusterapi.Event, 100)

	// Add channel to event channels
	if _, ok := m.eventChannels[eventType]; !ok {
		m.eventChannels[eventType] = make([]chan clusterapi.Event, 0)
	}
	m.eventChannels[eventType] = append(m.eventChannels[eventType], ch)

	return ch, nil
}

// Unsubscribe unsubscribes from cluster events
func (m *Manager) Unsubscribe(ctx context.Context, eventType string, ch <-chan clusterapi.Event) error {
	m.mu.Lock()
	defer m.mu.Unlock()

	// Find channel in event channels
	channels, ok := m.eventChannels[eventType]
	if !ok {
		return fmt.Errorf("event type not found: %s", eventType)
	}

	// Remove channel from event channels
	for i, c := range channels {
		if c == ch {
			m.eventChannels[eventType] = append(channels[:i], channels[i+1:]...)
			close(c)
			break
		}
	}

	return nil
}

// broadcastEvent broadcasts an event to all subscribers
func (m *Manager) broadcastEvent(event clusterapi.Event) {
	// Broadcast to all subscribers of the event type
	if channels, ok := m.eventChannels[event.Type]; ok {
		for _, ch := range channels {
			select {
			case ch <- event:
			default:
				// Channel is full, drop event
			}
		}
	}

	// Broadcast to all subscribers of all events
	if channels, ok := m.eventChannels["*"]; ok {
		for _, ch := range channels {
			select {
			case ch <- event:
			default:
				// Channel is full, drop event
			}
		}
	}
}

// GetCluster returns the underlying cluster implementation (not used in memory mode)
func (m *Manager) GetCluster() interface{} {
	return nil
}
