package distributed

import (
	"context"
	"fmt"
	"sync"
	"time"

	protov1 "xagent/proto/v1"

	"github.com/asynkron/protoactor-go/actor"
	"github.com/asynkron/protoactor-go/cluster"
	"github.com/asynkron/protoactor-go/cluster/clusterproviders/consul"
	"github.com/asynkron/protoactor-go/remote"
	"github.com/hashicorp/consul/api"
	"github.com/sirupsen/logrus"
)

// ClusterManager manages the distributed cluster
type ClusterManager struct {
	system     *actor.ActorSystem
	node       *protov1.Node
	nodes      map[string]*protov1.Node
	nodesMutex sync.RWMutex
	pid        *actor.PID
	consul     *api.Client
	logger     *logrus.Entry
	remote     *remote.Remote
	provider   cluster.ClusterProvider
	cluster    *cluster.Cluster
	name       string
	address    string
	port       int
}

// NodeInfo contains additional node information
type NodeInfo struct {
	LastSeen int64
	Status   protov1.NodeStatus
}

// NodeIdentityLookup implements cluster.IdentityLookup
type NodeIdentityLookup struct {
	manager *ClusterManager
	pids    map[string]*actor.PID
	mutex   sync.RWMutex
}

func NewNodeIdentityLookup(manager *ClusterManager) *NodeIdentityLookup {
	return &NodeIdentityLookup{
		manager: manager,
		pids:    make(map[string]*actor.PID),
	}
}

func (n *NodeIdentityLookup) Get(identity *cluster.ClusterIdentity) *actor.PID {
	n.mutex.RLock()
	defer n.mutex.RUnlock()

	if pid, ok := n.pids[identity.Kind]; ok {
		return pid
	}

	return actor.NewPID(n.manager.node.Address, identity.Kind)
}

func (n *NodeIdentityLookup) RemovePid(identity *cluster.ClusterIdentity, pid *actor.PID) {
	n.mutex.Lock()
	defer n.mutex.Unlock()

	if existingPid, ok := n.pids[identity.Kind]; ok {
		if existingPid.Equal(pid) {
			delete(n.pids, identity.Kind)
		}
	}
}

func (n *NodeIdentityLookup) Setup(cluster *cluster.Cluster, kinds []string, isClient bool) {
	if isClient {
		return
	}

	// Register actor kinds
	for _, kind := range kinds {
		props := actor.PropsFromProducer(func() actor.Actor {
			return &ClusterActor{manager: n.manager}
		})
		if pid, err := n.manager.system.Root.SpawnNamed(props, kind); err == nil {
			n.mutex.Lock()
			n.pids[kind] = pid
			n.mutex.Unlock()
		}
	}
}

func (n *NodeIdentityLookup) Shutdown() {
	n.mutex.Lock()
	defer n.mutex.Unlock()

	// Stop all registered actors
	for _, pid := range n.pids {
		n.manager.system.Root.Stop(pid)
	}
	n.pids = make(map[string]*actor.PID)
}

// NewClusterManager creates a new cluster manager
func NewClusterManager(system *actor.ActorSystem, config *Config) (*ClusterManager, error) {
	// Create Consul client
	config.ConsulConfig.Address = config.ConsulAddr
	consul, err := api.NewClient(config.ConsulConfig)
	if err != nil {
		return nil, fmt.Errorf("failed to create consul client: %v", err)
	}

	// Create remote config
	remoteConfig := remote.Configure(config.Address, config.Port)

	// Create remote
	remoteInstance := remote.NewRemote(system, remoteConfig)

	return &ClusterManager{
		system:  system,
		node:    config.Node,
		nodes:   make(map[string]*protov1.Node),
		consul:  consul,
		logger:  logrus.WithField("component", "cluster"),
		remote:  remoteInstance,
		name:    config.NodeName,
		address: config.Address,
		port:    config.Port,
	}, nil
}

// initCluster initializes the cluster configuration
func (cm *ClusterManager) initCluster() error {
	// Create remote configuration
	remoteConfig := remote.Configure(cm.node.Address, 8090)

	// Create consul provider
	provider, err := consul.New()
	if err != nil {
		return fmt.Errorf("failed to create consul provider: %v", err)
	}
	cm.provider = provider

	// Create identity lookup
	identityLookup := NewNodeIdentityLookup(cm)

	// Create cluster configuration
	clusterConfig := cluster.Configure(
		cm.node.Role,
		provider,
		identityLookup,
		remoteConfig,
	)

	// Create cluster instance
	cm.cluster = cluster.New(cm.system, clusterConfig)

	// Register cluster actor
	_, err = cm.system.Root.SpawnNamed(
		actor.PropsFromProducer(func() actor.Actor {
			return &ClusterActor{manager: cm}
		}),
		"cluster",
	)
	if err != nil {
		return fmt.Errorf("failed to spawn cluster actor: %v", err)
	}

	return nil
}

// Start starts the cluster manager
func (cm *ClusterManager) Start(ctx context.Context) error {
	// Start remote
	cm.remote.Start()

	// Start cluster
	cm.cluster.StartMember()

	// Start heartbeat
	go cm.startHeartbeat(ctx)

	return nil
}

// Stop stops the cluster manager
func (cm *ClusterManager) Stop() error {
	// Deregister from Consul
	if err := cm.consul.Agent().ServiceDeregister(cm.node.Id); err != nil {
		return fmt.Errorf("failed to deregister service: %v", err)
	}

	// Stop remote
	cm.remote.Shutdown(true)

	return nil
}

// GetNodes returns all active nodes in the cluster
func (cm *ClusterManager) GetNodes() []*protov1.Node {
	cm.nodesMutex.RLock()
	defer cm.nodesMutex.RUnlock()

	nodes := make([]*protov1.Node, 0, len(cm.nodes))
	for _, node := range cm.nodes {
		nodes = append(nodes, node)
	}
	return nodes
}

// GetNode gets a node by ID
func (cm *ClusterManager) GetNode(nodeID string) *protov1.Node {
	cm.nodesMutex.RLock()
	defer cm.nodesMutex.RUnlock()
	return cm.nodes[nodeID]
}

// UpdateNodeStatus updates a node's status
func (cm *ClusterManager) UpdateNodeStatus(nodeID string, status protov1.NodeStatus) error {
	cm.nodesMutex.Lock()
	defer cm.nodesMutex.Unlock()

	node, exists := cm.nodes[nodeID]
	if !exists {
		return fmt.Errorf("node not found: %s", nodeID)
	}

	node.Status = status
	return nil
}

// BroadcastMessage broadcasts a message to all nodes
func (cm *ClusterManager) BroadcastMessage(msg interface{}) error {
	nodes := cm.GetNodes()
	for _, node := range nodes {
		if node.Status == protov1.NodeStatus_NODE_STATUS_ACTIVE {
			pid := cm.cluster.Get(node.Id, "cluster")
			cm.system.Root.Send(pid, msg)
		}
	}
	return nil
}

// SendMessage sends a message to a node
func (cm *ClusterManager) SendMessage(nodeID string, msg interface{}) error {
	cm.nodesMutex.RLock()
	node := cm.nodes[nodeID]
	cm.nodesMutex.RUnlock()

	if node == nil {
		return fmt.Errorf("node not found: %s", nodeID)
	}

	if node.Status != protov1.NodeStatus_NODE_STATUS_ACTIVE {
		return fmt.Errorf("node %s is not active", nodeID)
	}

	pid := actor.NewPID(node.Address, nodeID)
	cm.system.Root.Send(pid, msg)
	return nil
}

// startHeartbeat starts the heartbeat goroutine
func (cm *ClusterManager) startHeartbeat(ctx context.Context) {
	go func() {
		ticker := time.NewTicker(5 * time.Second)
		defer ticker.Stop()

		for {
			select {
			case <-ctx.Done():
				return
			case <-ticker.C:
				cm.updateNodeStatus()
			}
		}
	}()
}

// updateNodeStatus updates the status of all nodes
func (cm *ClusterManager) updateNodeStatus() {
	cm.nodesMutex.Lock()
	defer cm.nodesMutex.Unlock()

	now := time.Now().UnixNano()
	for id, node := range cm.nodes {
		info := cm.getNodeInfo(node)
		if info == nil {
			continue
		}

		if now-info.LastSeen > 30*int64(time.Second) {
			node.Status = protov1.NodeStatus_NODE_STATUS_INACTIVE
			if now-info.LastSeen > 60*int64(time.Second) {
				delete(cm.nodes, id)
			}
		}
	}
}

// getNodeInfo gets the node info from the node's metadata
func (cm *ClusterManager) getNodeInfo(node *protov1.Node) *NodeInfo {
	info := &NodeInfo{
		LastSeen: time.Now().UnixNano(),
		Status:   node.Status,
	}
	return info
}

// handleNodeMessage handles node-related messages
func (a *ClusterActor) handleNodeMessage(node *protov1.Node) {
	a.manager.nodesMutex.Lock()
	defer a.manager.nodesMutex.Unlock()

	// Update node information with current timestamp
	info := a.manager.getNodeInfo(node)
	info.LastSeen = time.Now().UnixNano()
	a.manager.nodes[node.Id] = node
}

// generateNodeID generates a unique node ID
func generateNodeID() string {
	return fmt.Sprintf("node-%d", time.Now().UnixNano())
}

// ClusterActor handles cluster messages
type ClusterActor struct {
	manager *ClusterManager
	actor.Actor
}

// Receive handles incoming messages
func (a *ClusterActor) Receive(context actor.Context) {
	switch msg := context.Message().(type) {
	case *protov1.Node:
		a.handleNodeMessage(msg)
	case *protov1.DistributedDecision:
		a.handleDistributedDecision(msg)
	case *protov1.DistributedLearning:
		a.handleDistributedLearning(msg)
	case *protov1.DistributedPrediction:
		a.handleDistributedPrediction(msg)
	case *protov1.DistributedAdaptation:
		a.handleDistributedAdaptation(msg)
	}
}

func (ca *ClusterActor) handleDistributedDecision(decision *protov1.DistributedDecision) {
	// Handle distributed decision making
}

func (ca *ClusterActor) handleDistributedLearning(learning *protov1.DistributedLearning) {
	// Handle distributed learning
}

func (ca *ClusterActor) handleDistributedPrediction(prediction *protov1.DistributedPrediction) {
	// Handle distributed prediction
}

func (ca *ClusterActor) handleDistributedAdaptation(adaptation *protov1.DistributedAdaptation) {
	// Handle distributed adaptation
}

// NewNode creates a new node configuration
func (cm *ClusterManager) NewNode(id string, address string, role string) *protov1.Node {
	return &protov1.Node{
		Id:      id,
		Address: address,
		Role:    role,
		Status:  protov1.NodeStatus_NODE_STATUS_ACTIVE,
	}
}

// Configure configures the cluster
func (cm *ClusterManager) Configure() error {
	// Register with Consul
	registration := &api.AgentServiceRegistration{
		ID:      cm.node.Id,
		Name:    "xagent",
		Address: cm.node.Address,
		Tags:    []string{cm.node.Role},
	}

	if err := cm.consul.Agent().ServiceRegister(registration); err != nil {
		return fmt.Errorf("failed to register service: %v", err)
	}

	// Configure actor system
	cm.remote.Register("xagent", actor.PropsFromProducer(func() actor.Actor {
		return cm
	}))

	// Start remote
	cm.remote.Start()

	return nil
}

// StartHeartbeat starts the heartbeat mechanism
func (cm *ClusterManager) StartHeartbeat(ctx context.Context) {
	go func() {
		ticker := time.NewTicker(5 * time.Second)
		defer ticker.Stop()

		for {
			select {
			case <-ctx.Done():
				return
			case <-ticker.C:
				cm.broadcastHeartbeat()
			}
		}
	}()
}

// broadcastHeartbeat sends a heartbeat to all nodes
func (cm *ClusterManager) broadcastHeartbeat() {
	cm.nodesMutex.RLock()
	defer cm.nodesMutex.RUnlock()

	for _, node := range cm.nodes {
		if node.Id != cm.node.Id {
			msg := &protov1.AgentMessage{
				Id:   fmt.Sprintf("heartbeat_%d", time.Now().UnixNano()),
				Type: protov1.MessageType_MESSAGE_TYPE_STATUS,
				Content: &protov1.AgentMessage_Message{
					Message: "heartbeat",
				},
				Sender: cm.node.Id,
			}
			pid := actor.NewPID(node.Address, node.Id)
			cm.system.Root.Send(pid, msg)
		}
	}
}

// Receive handles incoming messages
func (cm *ClusterManager) Receive(context actor.Context) {
	switch msg := context.Message().(type) {
	case *protov1.AgentMessage:
		cm.handleAgentMessage(msg)
	}
}

// handleAgentMessage handles incoming agent messages
func (cm *ClusterManager) handleAgentMessage(msg *protov1.AgentMessage) {
	switch msg.Type {
	case protov1.MessageType_MESSAGE_TYPE_STATUS:
		// Handle heartbeat
		cm.nodesMutex.Lock()
		if node, exists := cm.nodes[msg.Sender]; exists {
			node.Status = protov1.NodeStatus_NODE_STATUS_ACTIVE
		}
		cm.nodesMutex.Unlock()
	}
}
