package distributed

import (
	"context"
	"fmt"
	"sync"
	"time"

	protov1 "xagent/proto/v1"

	"github.com/asynkron/protoactor-go/actor"
)

// PredictionManager handles distributed predictions
type PredictionManager struct {
	system         *actor.ActorSystem
	clusterManager *ClusterManager
	predictions    map[string]*protov1.DistributedPrediction
	predMutex      sync.RWMutex
	timeout        time.Duration
}

// NewPredictionManager creates a new prediction manager
func NewPredictionManager(system *actor.ActorSystem, clusterManager *ClusterManager) *PredictionManager {
	return &PredictionManager{
		system:         system,
		clusterManager: clusterManager,
		predictions:    make(map[string]*protov1.DistributedPrediction),
		timeout:        time.Second * 30,
	}
}

// RequestPrediction initiates a distributed prediction request
func (pm *PredictionManager) RequestPrediction(ctx context.Context, modelID string, features map[string]float32) (*protov1.DistributedPrediction, error) {
	// Create prediction request
	prediction := &protov1.DistributedPrediction{
		Id:                modelID,
		ModelId:           fmt.Sprintf("model-%d", time.Now().UnixNano()),
		Predictions:       make(map[string]float32),
		ContributingNodes: make([]string, 0),
		Status:            protov1.PredictionStatus_PREDICTION_STATUS_PENDING,
		Confidence:        0.0,
		Timestamp:         time.Now().UnixNano(),
	}

	// Get active nodes for prediction
	nodes := pm.clusterManager.GetNodes()
	contributors := make([]string, 0, len(nodes))
	for _, node := range nodes {
		if node.Status == protov1.NodeStatus_NODE_STATUS_ACTIVE {
			contributors = append(contributors, node.Id)
		}
	}
	prediction.ContributingNodes = contributors

	// Store prediction
	pm.predMutex.Lock()
	pm.predictions[prediction.Id] = prediction
	pm.predMutex.Unlock()

	// Start prediction process
	prediction.Status = protov1.PredictionStatus_PREDICTION_STATUS_PROCESSING
	if err := pm.clusterManager.BroadcastMessage(prediction); err != nil {
		return nil, fmt.Errorf("failed to broadcast prediction request: %v", err)
	}

	// Start timeout handler
	go pm.handlePredictionTimeout(ctx, prediction.Id)

	return prediction, nil
}

// ContributePrediction adds a node's prediction results
func (pm *PredictionManager) ContributePrediction(predictionID string, nodeID string, results map[string]float32, confidence float32) error {
	pm.predMutex.Lock()
	defer pm.predMutex.Unlock()

	prediction, exists := pm.predictions[predictionID]
	if !exists {
		return fmt.Errorf("prediction %s not found", predictionID)
	}

	if prediction.Status != protov1.PredictionStatus_PREDICTION_STATUS_PROCESSING {
		return fmt.Errorf("prediction %s is not in processing state", predictionID)
	}

	// Merge predictions with weights based on confidence
	for key, value := range results {
		if existing, ok := prediction.Predictions[key]; ok {
			// Weighted average based on confidence
			totalConf := prediction.Confidence + confidence
			prediction.Predictions[key] = (existing*prediction.Confidence + value*confidence) / totalConf
		} else {
			prediction.Predictions[key] = value
		}
	}

	// Update overall confidence
	prediction.Confidence = (prediction.Confidence + confidence) / 2.0

	// Check if all nodes have contributed
	if len(prediction.Predictions) > 0 && prediction.Confidence >= 0.8 {
		prediction.Status = protov1.PredictionStatus_PREDICTION_STATUS_READY
		// Broadcast final predictions
		pm.clusterManager.BroadcastMessage(prediction)
	}

	return nil
}

// GetPrediction returns a prediction by ID
func (pm *PredictionManager) GetPrediction(predictionID string) *protov1.DistributedPrediction {
	pm.predMutex.RLock()
	defer pm.predMutex.RUnlock()
	return pm.predictions[predictionID]
}

// Internal methods

func (pm *PredictionManager) handlePredictionTimeout(ctx context.Context, predictionID string) {
	timer := time.NewTimer(pm.timeout)
	defer timer.Stop()

	select {
	case <-ctx.Done():
		return
	case <-timer.C:
		pm.predMutex.Lock()
		defer pm.predMutex.Unlock()

		if prediction, exists := pm.predictions[predictionID]; exists {
			if prediction.Status == protov1.PredictionStatus_PREDICTION_STATUS_PROCESSING {
				if len(prediction.Predictions) > 0 {
					// Use partial results if available
					prediction.Status = protov1.PredictionStatus_PREDICTION_STATUS_READY
					pm.clusterManager.BroadcastMessage(prediction)
				} else {
					prediction.Status = protov1.PredictionStatus_PREDICTION_STATUS_FAILED
				}
			}
		}
	}
}

// PredictionActor handles prediction-related messages
type PredictionActor struct {
	manager *PredictionManager
	actor.Actor
	models map[string][]byte // Cached models for prediction
}

func (pa *PredictionActor) Receive(context actor.Context) {
	switch msg := context.Message().(type) {
	case *protov1.DistributedPrediction:
		pa.handlePrediction(msg)
	}
}

func (pa *PredictionActor) handlePrediction(prediction *protov1.DistributedPrediction) {
	switch prediction.Status {
	case protov1.PredictionStatus_PREDICTION_STATUS_PENDING:
		pa.processPrediction(prediction)
	case protov1.PredictionStatus_PREDICTION_STATUS_READY:
		pa.handleFinalPrediction(prediction)
	}
}

func (pa *PredictionActor) processPrediction(prediction *protov1.DistributedPrediction) {
	// Get model for prediction
	model := pa.getModel(prediction.ModelId)
	if model == nil {
		fmt.Printf("No model available for prediction: %s\n", prediction.ModelId)
		return
	}

	// Make prediction
	results, confidence := pa.makePrediction(model)

	// Submit results
	if err := pa.manager.ContributePrediction(prediction.Id, pa.manager.clusterManager.node.Id, results, confidence); err != nil {
		fmt.Printf("Failed to contribute prediction: %v\n", err)
	}
}

func (pa *PredictionActor) handleFinalPrediction(prediction *protov1.DistributedPrediction) {
	// Handle final prediction results
	fmt.Printf("Final prediction results for %s: %v (confidence: %f)\n",
		prediction.Id, prediction.Predictions, prediction.Confidence)
}

func (pa *PredictionActor) getModel(modelID string) []byte {
	// Implement model retrieval logic
	return pa.models[modelID]
}

func (pa *PredictionActor) makePrediction(model []byte) (map[string]float32, float32) {
	// Implement actual prediction logic
	// This is a placeholder implementation
	predictions := make(map[string]float32)
	predictions["result"] = 0.75
	confidence := float32(0.8)
	return predictions, confidence
}
