package actors

import (
	"context"
	"fmt"
	"strings"
	"sync"
	"time"

	"github.com/google/uuid"
	"github.com/sirupsen/logrus"

	"xagent/internal/knowledge"
	"xagent/internal/memory"
	"xagent/internal/monitoring"
	"xagent/internal/providers"
	"xagent/internal/resource"
	"xagent/internal/types"
	proto "xagent/proto/v1"
)

// AIAgent extends BaseAgent with AI-specific capabilities
type AIAgent struct {
	*BaseAgent
	llmProvider     providers.LLMProvider
	knowledgeBase   *knowledge.RAGEngine
	scheduler       types.TaskScheduler
	resourceManager *resource.Manager
	maxConcurrent   int
	mutex           sync.RWMutex
	memory          memory.Manager
	currentRole     types.Role
}

// NewAIAgent creates a new AI agent
func NewAIAgent(name string, description string) *AIAgent {
	return &AIAgent{
		BaseAgent:     NewBaseAgent(name, description),
		maxConcurrent: 5,
	}
}

// SetLLMProvider sets the LLM provider
func (a *AIAgent) SetLLMProvider(provider providers.LLMProvider) {
	a.mutex.Lock()
	defer a.mutex.Unlock()
	a.llmProvider = provider
}

// SetKnowledgeBase sets the knowledge base
func (a *AIAgent) SetKnowledgeBase(kb *knowledge.RAGEngine) {
	a.mutex.Lock()
	defer a.mutex.Unlock()
	a.knowledgeBase = kb
}

// SetScheduler sets the task scheduler
func (a *AIAgent) SetScheduler(scheduler types.TaskScheduler) {
	a.mutex.Lock()
	defer a.mutex.Unlock()
	a.scheduler = scheduler
}

// SetResourceManager sets the resource manager
func (a *AIAgent) SetResourceManager(rm *resource.Manager) {
	a.mutex.Lock()
	defer a.mutex.Unlock()
	a.resourceManager = rm
}

// Initialize initializes the agent with all required dependencies
func (a *AIAgent) Initialize() error {
	if a.llmProvider == nil {
		return fmt.Errorf("LLM provider not set")
	}
	if a.knowledgeBase == nil {
		return fmt.Errorf("knowledge base not set")
	}
	if a.scheduler == nil {
		return fmt.Errorf("scheduler not set")
	}
	if a.resourceManager == nil {
		return fmt.Errorf("resource manager not set")
	}
	return nil
}

// ProcessTask processes a task with AI capabilities
func (a *AIAgent) ProcessTask(task *proto.Task) error {
	metrics := monitoring.GetMetricsRegistry()
	startTime := time.Now()

	a.logger.WithFields(logrus.Fields{
		"task_id": task.Id,
		"type":    task.Name,
	}).Info("AI agent processing task")

	// Record task start
	metrics.TasksQueued.WithLabelValues(task.Name).Inc()

	// Check if we can acquire resources
	if !a.resourceManager.AcquireResource(task.Id) {
		// Schedule task for later execution
		if err := a.scheduler.ScheduleTask(context.Background(), task); err != nil {
			return fmt.Errorf("failed to schedule task: %w", err)
		}
		metrics.TasksQueued.WithLabelValues(task.Name).Dec()
		return fmt.Errorf("task scheduled for later execution due to resource constraints")
	}

	defer func() {
		a.resourceManager.ReleaseResource(task.Id)
		metrics.TasksQueued.WithLabelValues(task.Name).Dec()
		metrics.TaskDuration.WithLabelValues(a.GetName(), task.Name).Observe(time.Since(startTime).Seconds())
	}()

	// Execute task using base implementation
	if err := a.BaseAgent.ProcessTask(task); err != nil {
		metrics.TaskErrors.WithLabelValues(a.GetName(), task.Name, err.Error()).Inc()
		return fmt.Errorf("base processing failed: %w", err)
	}

	metrics.TasksProcessed.WithLabelValues(a.GetName(), "success").Inc()
	return nil
}

// HandleMessage overrides BaseAgent's HandleMessage to add AI-specific processing
func (a *AIAgent) HandleMessage(msg *proto.AgentMessage) error {
	a.logger.WithFields(logrus.Fields{
		"message_id": msg.Id,
		"type":       msg.Type,
	}).Debug("AI agent handling message")

	// Process with base implementation first
	if err := a.BaseAgent.HandleMessage(msg); err != nil {
		return fmt.Errorf("base message handling failed: %w", err)
	}

	// AI-specific message handling
	switch msg.Type {
	case proto.MessageType_MESSAGE_TYPE_REQUEST:
		return a.handleAIRequest(msg)
	case proto.MessageType_MESSAGE_TYPE_RESPONSE:
		return a.handleAIResponse(msg)
	}

	return nil
}

// handleAIRequest handles AI-specific request messages
func (a *AIAgent) handleAIRequest(msg *proto.AgentMessage) error {
	if a.llmProvider == nil {
		return fmt.Errorf("no LLM provider configured")
	}

	content := ""
	switch c := msg.Content.(type) {
	case *proto.AgentMessage_Message:
		content = c.Message
	case *proto.AgentMessage_Prediction:
		content = c.Prediction.String()
	default:
		return fmt.Errorf("unsupported message content type")
	}

	response, err := a.ProcessWithAI(context.Background(), content)
	if err != nil {
		return fmt.Errorf("AI processing failed: %w", err)
	}

	// Send response back
	if a.context != nil && a.context.Sender() != nil {
		responseMsg := &proto.AgentMessage{
			Id:   uuid.NewString(),
			Type: proto.MessageType_MESSAGE_TYPE_RESPONSE,
			Content: &proto.AgentMessage_Message{
				Message: response,
			},
			Sender: a.GetRole(),
		}
		a.context.Send(a.context.Sender(), responseMsg)
	}

	return nil
}

// handleAIResponse handles AI-specific response messages
func (a *AIAgent) handleAIResponse(msg *proto.AgentMessage) error {
	// Store response in knowledge base if available
	if a.knowledgeBase != nil {
		// Log the response for now
		a.logger.WithFields(logrus.Fields{
			"message_id": msg.Id,
			"type":       msg.Type,
		}).Debug("Response received")
	}
	return nil
}

// ProcessWithAI processes input using AI capabilities
func (a *AIAgent) ProcessWithAI(ctx context.Context, input string) (string, error) {
	metrics := monitoring.GetMetricsRegistry()
	startTime := time.Now()

	if a.llmProvider == nil {
		metrics.LLMRequestErrors.WithLabelValues("unknown", "unknown", "provider_not_configured").Inc()
		return "", fmt.Errorf("no LLM provider configured")
	}

	a.logger.WithField("input", input).Debug("Processing with AI")

	// Step 1: Retrieve relevant knowledge
	var relevantKnowledge string
	if a.knowledgeBase != nil {
		searchStart := time.Now()
		matches, err := a.knowledgeBase.Search(ctx, input, 5)
		if err != nil {
			metrics.MemoryErrors.WithLabelValues("search", err.Error()).Inc()
			a.logger.WithError(err).Warn("Failed to search knowledge base")
		} else {
			metrics.MemoryLatency.WithLabelValues("search").Observe(time.Since(searchStart).Seconds())
			// Combine relevant knowledge
			for _, match := range matches {
				relevantKnowledge += match.Document.Content + "\n"
			}
		}
	}

	// Step 2: Generate prompt
	prompt := a.generatePrompt(input, relevantKnowledge)

	// Step 3: Process with LLM
	options := &providers.CompletionOptions{
		MaxTokens:   1000,
		Temperature: 0.7,
	}

	llmStart := time.Now()
	response, err := a.llmProvider.Complete(ctx, prompt, options)
	llmDuration := time.Since(llmStart)

	if err != nil {
		metrics.LLMRequestErrors.WithLabelValues(a.llmProvider.GetName(), options.Model, err.Error()).Inc()
		return "", fmt.Errorf("LLM processing failed: %w", err)
	}

	metrics.LLMLatency.WithLabelValues(a.llmProvider.GetName(), options.Model, "complete").Observe(llmDuration.Seconds())
	metrics.LLMRequests.WithLabelValues(a.llmProvider.GetName(), options.Model).Inc()

	// Step 4: Store in memory
	if err := a.storeInteraction(ctx, input, response); err != nil {
		metrics.MemoryErrors.WithLabelValues("store", err.Error()).Inc()
		a.logger.WithError(err).Warn("Failed to store interaction")
	}

	metrics.AgentLatency.WithLabelValues(a.GetName(), "process_with_ai").Observe(time.Since(startTime).Seconds())
	return response, nil
}

// generatePrompt generates a prompt for the LLM
func (a *AIAgent) generatePrompt(input, knowledge string) string {
	var prompt strings.Builder

	// Add role context
	prompt.WriteString(fmt.Sprintf("You are %s, a %s.\n\n", a.GetName(), a.GetDescription()))

	// Add relevant knowledge if available
	if knowledge != "" {
		prompt.WriteString("Relevant knowledge:\n")
		prompt.WriteString(knowledge)
		prompt.WriteString("\n\n")
	}

	// Add task input
	prompt.WriteString("Task:\n")
	prompt.WriteString(input)
	prompt.WriteString("\n\nResponse:")

	return prompt.String()
}

// storeInteraction stores the interaction in memory
func (a *AIAgent) storeInteraction(ctx context.Context, input, response string) error {
	if a.memory == nil {
		return fmt.Errorf("memory manager not configured")
	}

	entry := &memory.Entry{
		ID:      uuid.NewString(),
		Type:    types.MemoryTypeInteraction,
		Content: response,
		Metadata: map[string]interface{}{
			"input":     input,
			"timestamp": time.Now(),
			"agent":     a.GetName(),
			"role":      a.GetRole(),
		},
		CreatedAt: time.Now(),
		UpdatedAt: time.Now(),
	}

	return a.memory.Store(ctx, entry)
}

// AssignRole assigns a new role to the agent
func (a *AIAgent) AssignRole(role types.Role) error {
	if a.currentRole != nil {
		if err := a.currentRole.OnRevoke(a); err != nil {
			return err
		}
	}

	if err := role.OnAssign(a); err != nil {
		return err
	}

	a.currentRole = role
	return nil
}

// GetCurrentRole returns the agent's current role
func (a *AIAgent) GetCurrentRole() types.Role {
	return a.currentRole
}
