package ai

import (
	"context"
	"errors"
	"fmt"
	"math"
	"strings"
	"sync"
	"time"
	"xagent/internal/memory"
	"xagent/internal/messages"
	proto "xagent/proto/v1"
)

// PredictionMetrics tracks prediction performance
type PredictionMetrics struct {
	PredictionsMade int
	AccuracyRate    float64
	ConfidenceLevel float64
	LastUpdate      time.Time
}

// PredictionResult represents the outcome prediction for a task
type PredictionResult struct {
	TaskID             string
	SuccessProbability float64
	Confidence         float64
	SampleSize         int
	PredictedAt        time.Time
	EstimatedDuration  time.Duration
	RequiredResources  map[string]float64
}

// PredictionManager manages outcome predictions
type PredictionManager struct {
	memoryManager  memory.MemoryManager
	metrics        *PredictionMetrics
	historicalData map[string][]float64
	trendModels    map[string]*TrendModel
	mutex          sync.RWMutex
	updateInterval time.Duration
}

// TrendModel represents a statistical model for trend analysis
type TrendModel struct {
	Values     []float64
	Weights    []float64
	Timestamps []time.Time
	Alpha      float64 // Exponential smoothing factor
}

// NewPredictionManager creates a new prediction manager
func NewPredictionManager(memoryManager memory.MemoryManager) *PredictionManager {
	return &PredictionManager{
		memoryManager:  memoryManager,
		metrics:        &PredictionMetrics{},
		historicalData: make(map[string][]float64),
		trendModels:    make(map[string]*TrendModel),
		updateInterval: time.Minute * 5,
	}
}

// PredictTaskCompletion predicts task completion time and success probability
func (pm *PredictionManager) PredictTaskCompletion(ctx context.Context, task *proto.Task) (*PredictionResult, error) {
	if task == nil {
		return nil, errors.New("task cannot be nil")
	}

	// Get historical data for similar tasks
	similarTasks, err := pm.findSimilarTasks(task)
	if err != nil {
		return nil, fmt.Errorf("failed to find similar tasks: %w", err)
	}

	if len(similarTasks) == 0 {
		return nil, ErrNoHistoricalData
	}

	// Calculate predictions
	duration := pm.predictDuration(similarTasks)
	successProb := pm.predictSuccess(similarTasks)
	resources := pm.predictResources(similarTasks)

	result := &PredictionResult{
		TaskID:             task.Id,
		SuccessProbability: successProb,
		EstimatedDuration:  duration,
		RequiredResources:  resources,
		Confidence:         pm.calculateConfidence(len(similarTasks)),
		SampleSize:         len(similarTasks),
		PredictedAt:        time.Now(),
	}

	// Validate prediction
	if err := pm.validatePrediction(result); err != nil {
		return nil, fmt.Errorf("invalid prediction result: %w", err)
	}

	// Update metrics
	pm.updateMetrics(true)

	return result, nil
}

// AnalyzeTrends analyzes historical trends for pattern recognition
func (pm *PredictionManager) AnalyzeTrends(ctx context.Context, metric string, timeframe time.Duration) (*TrendAnalysis, error) {
	pm.mutex.RLock()
	model, exists := pm.trendModels[metric]
	pm.mutex.RUnlock()

	if !exists {
		model = &TrendModel{
			Alpha: 0.2, // Default smoothing factor
		}
		pm.mutex.Lock()
		pm.trendModels[metric] = model
		pm.mutex.Unlock()
	}

	// Get historical data
	data, err := pm.getHistoricalData(metric, timeframe)
	if err != nil {
		return nil, err
	}

	// Update model
	model.updateModel(data)

	// Calculate trends
	trend := model.calculateTrend()
	seasonality := model.calculateSeasonality()
	forecast := model.forecast(5) // Predict next 5 points

	return &TrendAnalysis{
		Metric:      metric,
		Trend:       trend,
		Seasonality: seasonality,
		Forecast:    forecast,
		Confidence:  model.calculateConfidence(),
		Timestamp:   time.Now(),
	}, nil
}

// PredictResourceUsage predicts future resource requirements
func (pm *PredictionManager) PredictResourceUsage(ctx context.Context, timeframe time.Duration) (*ResourcePrediction, error) {
	// Get historical resource usage
	cpuUsage, err := pm.getHistoricalData("cpu_usage", timeframe)
	if err != nil {
		return nil, err
	}

	memoryUsage, err := pm.getHistoricalData("memory_usage", timeframe)
	if err != nil {
		return nil, err
	}

	// Predict future usage
	prediction := &ResourcePrediction{
		CPUPrediction:    pm.predictResourceMetric(ctx, "cpu_usage", cpuUsage),
		MemoryPrediction: pm.predictResourceMetric(ctx, "memory_usage", memoryUsage),
		TimeRange:        timeframe,
		Confidence:       pm.calculateConfidence(len(cpuUsage)),
		Timestamp:        time.Now(),
	}

	return prediction, nil
}

// StartPredictionLoop starts periodic prediction updates
func (pm *PredictionManager) StartPredictionLoop(ctx context.Context) {
	go func() {
		ticker := time.NewTicker(pm.updateInterval)
		defer ticker.Stop()

		for {
			select {
			case <-ctx.Done():
				return
			case <-ticker.C:
				pm.updatePredictions(ctx)
			}
		}
	}()
}

// Helper functions

func (pm *PredictionManager) findSimilarTasks(task *proto.Task) ([]*memory.MemoryEntry, error) {
	// Find tasks with similar type and tags
	query := task.Description
	for _, tag := range task.Tags {
		query += " " + tag
	}

	// Use context and proper parameters
	ctx := context.Background()
	return pm.memoryManager.GetRelevantMemories(ctx, query, 10, 0.7)
}

func (pm *PredictionManager) predictDuration(tasks []*memory.MemoryEntry) time.Duration {
	if len(tasks) == 0 {
		return time.Hour // Default prediction
	}

	var total time.Duration
	for _, task := range tasks {
		metadata := task.GetMetadata()
		if duration, ok := metadata["duration"].(time.Duration); ok {
			total += duration
		}
	}

	return total / time.Duration(len(tasks))
}

func (pm *PredictionManager) predictSuccess(tasks []*memory.MemoryEntry) float64 {
	if len(tasks) == 0 {
		return 0.5 // Default 50% probability
	}

	successes := 0
	for _, task := range tasks {
		metadata := task.GetMetadata()
		if success, ok := metadata["success"].(bool); ok && success {
			successes++
		}
	}

	return float64(successes) / float64(len(tasks))
}

func (pm *PredictionManager) predictResources(tasks []*memory.MemoryEntry) map[string]float64 {
	resources := make(map[string]float64)
	if len(tasks) == 0 {
		return resources
	}

	for _, task := range tasks {
		metadata := task.GetMetadata()
		for key, value := range metadata {
			if strings.HasSuffix(key, "_usage") {
				if usage, ok := value.(float64); ok {
					resources[key] += usage
				}
			}
		}
	}

	// Normalize
	for key := range resources {
		resources[key] /= float64(len(tasks))
	}

	return resources
}

func (pm *PredictionManager) calculateConfidence(sampleSize int) float64 {
	if sampleSize == 0 {
		return 0.1 // Minimum confidence
	}
	// Use more conservative max confidence (0.95)
	return math.Min(0.95, float64(sampleSize)/20.0)
}

func (pm *PredictionManager) getHistoricalData(metric string, timeframe time.Duration) ([]float64, error) {
	pm.mutex.RLock()
	data, exists := pm.historicalData[metric]
	pm.mutex.RUnlock()

	if !exists {
		return nil, fmt.Errorf("no historical data for metric: %s", metric)
	}

	return data, nil
}

func (pm *PredictionManager) updatePredictions(ctx context.Context) {
	pm.mutex.Lock()
	defer pm.mutex.Unlock()

	// Update trend models
	for metric, model := range pm.trendModels {
		if data, err := pm.getHistoricalData(metric, 24*time.Hour); err == nil {
			model.updateModel(data)
		}
	}

	// Update metrics
	pm.metrics.LastUpdate = time.Now()
}

func (pm *PredictionManager) updateMetrics(success bool) {
	pm.metrics.PredictionsMade++
	if success {
		total := float64(pm.metrics.PredictionsMade)
		current := pm.metrics.AccuracyRate * (total - 1)
		pm.metrics.AccuracyRate = (current + 1) / total
	}
}

// Model methods

func (tm *TrendModel) updateModel(data []float64) {
	tm.Values = data
	tm.Weights = make([]float64, len(data))

	// Calculate exponential weights
	for i := range tm.Weights {
		tm.Weights[i] = math.Pow(1-tm.Alpha, float64(len(data)-i-1))
	}
}

func (tm *TrendModel) calculateTrend() float64 {
	if len(tm.Values) < 2 {
		return 0
	}

	// Calculate weighted linear regression
	var sumX, sumY, sumXY, sumXX float64
	for i, y := range tm.Values {
		x := float64(i)
		w := tm.Weights[i]
		sumX += x * w
		sumY += y * w
		sumXY += x * y * w
		sumXX += x * x * w
	}

	slope := (sumXY - sumX*sumY) / (sumXX - sumX*sumX)
	return slope
}

func (tm *TrendModel) calculateSeasonality() float64 {
	if len(tm.Values) < 2 {
		return 0
	}

	// Calculate average deviation from trend
	trend := tm.calculateTrend()
	var seasonality float64
	for i, y := range tm.Values {
		expected := float64(i)*trend + tm.Values[0]
		seasonality += math.Abs(y - expected)
	}

	return seasonality / float64(len(tm.Values))
}

func (tm *TrendModel) forecast(points int) []float64 {
	if len(tm.Values) == 0 {
		return nil
	}

	trend := tm.calculateTrend()
	seasonality := tm.calculateSeasonality()
	lastValue := tm.Values[len(tm.Values)-1]

	forecast := make([]float64, points)
	for i := range forecast {
		// Combine trend and seasonality
		forecast[i] = lastValue + trend*float64(i+1) + seasonality*math.Sin(float64(i))
	}

	return forecast
}

func (tm *TrendModel) calculateConfidence() float64 {
	if len(tm.Values) < 2 {
		return 0
	}

	// Calculate R-squared value
	trend := tm.calculateTrend()
	var ssRes, ssTot float64
	mean := 0.0
	for _, y := range tm.Values {
		mean += y
	}
	mean /= float64(len(tm.Values))

	for i, y := range tm.Values {
		predicted := float64(i)*trend + tm.Values[0]
		ssRes += math.Pow(y-predicted, 2)
		ssTot += math.Pow(y-mean, 2)
	}

	if ssTot == 0 {
		return 0
	}
	return 1 - (ssRes / ssTot)
}

// Types for prediction results

type TrendAnalysis struct {
	Metric      string
	Trend       float64
	Seasonality float64
	Forecast    []float64
	Confidence  float64
	Timestamp   time.Time
}

type ResourcePrediction struct {
	CPUPrediction    float64
	MemoryPrediction float64
	TimeRange        time.Duration
	Confidence       float64
	Timestamp        time.Time
}

// GetMetrics returns current prediction metrics
func (pm *PredictionManager) GetMetrics() *PredictionMetrics {
	pm.mutex.RLock()
	defer pm.mutex.RUnlock()
	return pm.metrics
}

// predictResourceMetric predicts a specific resource metric
func (pm *PredictionManager) predictResourceMetric(ctx context.Context, metric string, history []float64) float64 {
	if len(history) < 2 {
		return 0
	}

	// Simple moving average prediction
	sum := 0.0
	for _, value := range history {
		sum += value
	}
	average := sum / float64(len(history))

	// Calculate trend
	trend := (history[len(history)-1] - history[0]) / float64(len(history)-1)

	// Predict next value using trend
	prediction := average + trend

	// Ensure prediction is within reasonable bounds (0-1 for resource metrics)
	if prediction < 0 {
		prediction = 0
	} else if prediction > 1 {
		prediction = 1
	}

	return prediction
}

// Add error types for better error handling
var (
	ErrInvalidTaskMetadata = errors.New("invalid task metadata")
	ErrInvalidSampleSize   = errors.New("invalid sample size")
	ErrInvalidPrediction   = errors.New("invalid prediction")
	ErrNoHistoricalData    = errors.New("no historical data available")
)

// Improve validatePrediction to handle additional fields
func (pm *PredictionManager) validatePrediction(prediction *PredictionResult) error {
	if prediction == nil {
		return errors.New("prediction cannot be nil")
	}

	if prediction.SuccessProbability < 0 || prediction.SuccessProbability > 1 {
		return errors.New("invalid success probability")
	}

	if prediction.Confidence < 0 || prediction.Confidence > 1 {
		return errors.New("invalid confidence value")
	}

	if prediction.EstimatedDuration < 0 {
		return errors.New("invalid duration")
	}

	if prediction.RequiredResources != nil {
		for metric, value := range prediction.RequiredResources {
			if value < 0 || value > 1 {
				return fmt.Errorf("invalid resource usage value for %s", metric)
			}
		}
	}

	return nil
}

// Add method to implement proto.Message interface
func (pr *PredictionResult) GetAction() string {
	return "prediction"
}

// Add method to implement proto.Message interface
func (pr *PredictionResult) Validate() error {
	if pr.TaskID == "" {
		return errors.New("task ID cannot be empty")
	}
	if pr.SuccessProbability < 0 || pr.SuccessProbability > 1 {
		return errors.New("invalid success probability")
	}
	return nil
}

// Add cleanup method for historical data
func (pm *PredictionManager) cleanupHistoricalData(maxAge time.Duration) {
	pm.mutex.Lock()
	defer pm.mutex.Unlock()

	cutoff := time.Now().Add(-maxAge)

	for metric := range pm.historicalData {
		// Assuming we store timestamps alongside values
		if model, exists := pm.trendModels[metric]; exists && len(model.Timestamps) > 0 {
			// Find index of oldest data within maxAge
			var idx int
			for i, ts := range model.Timestamps {
				if ts.After(cutoff) {
					idx = i
					break
				}
			}

			// Trim old data
			if idx > 0 {
				pm.historicalData[metric] = pm.historicalData[metric][idx:]
				model.Values = model.Values[idx:]
				model.Timestamps = model.Timestamps[idx:]
				model.Weights = model.Weights[idx:]
			}
		}
	}
}

// Ensure PredictionResult implements messages.Message interface
func (pr *PredictionResult) GetType() messages.MessageType {
	return messages.TypePrediction
}

func (pr *PredictionResult) GetContent() string {
	return fmt.Sprintf("Task %s prediction: %.2f%% success probability", pr.TaskID, pr.SuccessProbability*100)
}

func (pr *PredictionResult) GetMetadata() map[string]interface{} {
	return map[string]interface{}{
		"taskId":             pr.TaskID,
		"successProbability": pr.SuccessProbability,
		"confidence":         pr.Confidence,
		"sampleSize":         pr.SampleSize,
		"predictedAt":        pr.PredictedAt.Format(time.RFC3339),
		"estimatedDuration":  pr.EstimatedDuration.String(),
		"requiredResources":  pr.RequiredResources,
	}
}

// Add method to convert to proto message
func (pr *PredictionResult) ToProto() *proto.AgentMessage {
	return &proto.AgentMessage{
		Type: proto.MessageType_MESSAGE_TYPE_PREDICTION,
		Content: &proto.AgentMessage_Prediction{
			Prediction: &proto.PredictionResult{
				TaskId:             pr.TaskID,
				SuccessProbability: pr.SuccessProbability,
				Confidence:         pr.Confidence,
				SampleSize:         int32(pr.SampleSize),
				PredictedAt:        pr.PredictedAt.Format(time.RFC3339),
				EstimatedDuration:  pr.EstimatedDuration.Nanoseconds(),
				RequiredResources:  pr.RequiredResources,
			},
		},
	}
}

// Add method to create PredictionResult from proto message
func PredictionResultFromProto(msg *proto.PredictionResult) *PredictionResult {
	predictedAt, _ := time.Parse(time.RFC3339, msg.PredictedAt)
	return &PredictionResult{
		TaskID:             msg.TaskId,
		SuccessProbability: msg.SuccessProbability,
		Confidence:         msg.Confidence,
		SampleSize:         int(msg.SampleSize),
		PredictedAt:        predictedAt,
		EstimatedDuration:  time.Duration(msg.EstimatedDuration),
		RequiredResources:  msg.RequiredResources,
	}
}

// Add validation method for proto messages
func (pr *PredictionResult) ValidateProto() error {
	if pr.TaskID == "" {
		return errors.New("task ID cannot be empty")
	}
	if pr.SuccessProbability < 0 || pr.SuccessProbability > 1 {
		return errors.New("invalid success probability")
	}
	if pr.Confidence < 0 || pr.Confidence > 1 {
		return errors.New("invalid confidence value")
	}
	if pr.SampleSize < 0 {
		return errors.New("invalid sample size")
	}
	if pr.EstimatedDuration < 0 {
		return errors.New("invalid duration")
	}
	return nil
}

// Add helper method to create error response
func (pm *PredictionManager) createErrorResponse(err error) *proto.AgentMessage {
	return &proto.AgentMessage{
		Type: proto.MessageType_MESSAGE_TYPE_ERROR,
		Content: &proto.AgentMessage_Error{
			Error: &proto.Error{
				Message: err.Error(),
				Code:    "PREDICTION_ERROR",
			},
		},
	}
}

// Add method to handle prediction requests
func (pm *PredictionManager) HandlePredictionRequest(ctx context.Context, req *proto.PredictionRequest) (*proto.AgentMessage, error) {
	// Create a task from the request parameters
	task := &proto.Task{
		Id:          req.TaskId,
		Description: req.Parameters["description"],
	}

	result, err := pm.PredictTaskCompletion(ctx, task)
	if err != nil {
		return pm.createErrorResponse(err), err
	}
	return result.ToProto(), nil
}
