package actor

import (
	"context"
	"fmt"
	"sync"
	"time"

	"github.com/asynkron/protoactor-go/actor"
	"github.com/google/uuid"
	"github.com/sirupsen/logrus"

	"xagent/internal/knowledge"
	"xagent/internal/memory"
	"xagent/internal/providers"
	proto "xagent/proto/v1"
)

// RoleState represents the state of a role
type RoleState int

const (
	RoleStateIdle RoleState = iota
	RoleStateThinking
	RoleStatePlanning
	RoleStateExecuting
	RoleStateWaiting
	RoleStateFinished
)

// RoleContext represents the runtime context for a role
type RoleContext struct {
	env          *EnvironmentActor
	memory       memory.MemoryManager
	state        RoleState
	currentTask  *proto.Task
	actions      map[string]ActorAction
	capabilities []string
	thoughtChain *ActorThoughtChain
	mutex        sync.RWMutex
}

// ActorRole combines Actor and Role capabilities
type ActorRole struct {
	ID          string
	Name        string
	Role        string
	Description string
	Profile     string

	context     actor.Context
	rc          *RoleContext
	llm         *providers.MockLLM
	knowledge   *knowledge.RAGEngine
	supervisor  *actor.PID
	logger      *logrus.Logger
	messagePool *ActorMessagePool
	mutex       sync.RWMutex
}

// NewActorRole creates a new ActorRole instance
func NewActorRole(name, role, profile, description string) *ActorRole {
	return &ActorRole{
		ID:          uuid.NewString(),
		Name:        name,
		Role:        role,
		Description: description,
		Profile:     profile,
		rc: &RoleContext{
			memory:       memory.NewInMemoryManager(),
			state:        RoleStateIdle,
			actions:      make(map[string]ActorAction),
			capabilities: make([]string, 0),
			thoughtChain: &ActorThoughtChain{
				Thoughts: make([]ActorThought, 0),
			},
		},
		messagePool: NewActorMessagePool(),
		logger:      logrus.New(),
		llm:         providers.NewMockLLM(),
	}
}

// Receive handles incoming messages
func (ar *ActorRole) Receive(context actor.Context) {
	ar.context = context

	switch msg := context.Message().(type) {
	case *actor.Started:
		ar.OnStarted()
	case *actor.Stopping:
		ar.OnStopping()
	case *actor.Stopped:
		ar.OnStopped()
	case *actor.Restarting:
		ar.OnRestarting()
	case *proto.Task:
		ar.handleTask(context, msg)
	case *proto.AgentMessage:
		ar.handleAgentMessage(context, msg)
	}
}

// handleAgentMessage processes agent messages
func (ar *ActorRole) handleAgentMessage(ctx actor.Context, msg *proto.AgentMessage) {
	// Add message to pool for observers
	ar.messagePool.AddMessage(msg)

	// Process message based on type
	switch msg.Type {
	case proto.MessageType_MESSAGE_TYPE_REQUEST:
		ar.handleRequest(ctx, msg)
	case proto.MessageType_MESSAGE_TYPE_RESPONSE:
		ar.handleResponse(ctx, msg)
	case proto.MessageType_MESSAGE_TYPE_ERROR:
		ar.handleError(ctx, msg)
	}
}

// handleRequest processes request messages
func (ar *ActorRole) handleRequest(ctx actor.Context, msg *proto.AgentMessage) {
	// Process request based on content type
	switch content := msg.Content.(type) {
	case *proto.AgentMessage_Message:
		// Handle text message
		ar.logger.WithFields(logrus.Fields{
			"sender":  msg.Sender,
			"content": content.Message,
		}).Info("Received message")
	}
}

// handleResponse processes response messages
func (ar *ActorRole) handleResponse(ctx actor.Context, msg *proto.AgentMessage) {
	ar.logger.WithFields(logrus.Fields{
		"sender": msg.Sender,
		"type":   msg.Type,
	}).Info("Received response")
}

// handleError processes error messages
func (ar *ActorRole) handleError(ctx actor.Context, msg *proto.AgentMessage) {
	if errContent, ok := msg.Content.(*proto.AgentMessage_Error); ok {
		ar.logger.WithFields(logrus.Fields{
			"sender":  msg.Sender,
			"code":    errContent.Error.Code,
			"message": errContent.Error.Message,
		}).Error("Received error")
	}
}

// handleTask processes tasks using both actor and role capabilities
func (ar *ActorRole) handleTask(ctx actor.Context, task *proto.Task) {
	ar.mutex.Lock()
	ar.rc.state = RoleStateExecuting
	ar.rc.currentTask = task
	ar.mutex.Unlock()

	defer func() {
		ar.mutex.Lock()
		ar.rc.state = RoleStateIdle
		ar.mutex.Unlock()
	}()

	// Think about the task
	initialThought, err := ar.Think(context.Background(), task.Description)
	if err != nil {
		ar.sendError(ctx, err)
		return
	}

	// Log initial thoughts
	ar.logger.WithFields(logrus.Fields{
		"task":      task.Id,
		"thoughts":  initialThought.Content,
		"reasoning": initialThought.Reasoning,
		"plan":      initialThought.Plan,
	}).Info("Initial thoughts about task")

	// Use the thought to plan
	if err := ar.Plan(context.Background()); err != nil {
		ar.sendError(ctx, err)
		return
	}

	// Execute plan
	if err := ar.Execute(context.Background()); err != nil {
		ar.sendError(ctx, err)
		return
	}

	// Send success response
	ctx.Send(ctx.Parent(), &proto.AgentMessage{
		Id:   uuid.NewString(),
		Type: proto.MessageType_MESSAGE_TYPE_RESPONSE,
		Content: &proto.AgentMessage_Message{
			Message: fmt.Sprintf("Task completed successfully: %s", task.Id),
		},
		Metadata: map[string]string{
			"task_id": task.Id,
			"status":  "completed",
		},
	})
}

// Think generates thoughts about the current task
func (ar *ActorRole) Think(ctx context.Context, input string) (*ActorThought, error) {
	ar.mutex.Lock()
	ar.rc.state = RoleStateThinking
	ar.mutex.Unlock()

	defer func() {
		ar.mutex.Lock()
		ar.rc.state = RoleStateIdle
		ar.mutex.Unlock()
	}()

	// Build thinking prompt
	prompt := fmt.Sprintf(`As a %s, analyze the following task and provide your thoughts:

Task: %s

Please structure your response as follows:
1. Initial thoughts and understanding
2. Reasoning process
3. Action plan (as a list of steps)
4. Potential issues or criticisms
5. Next immediate action to take

Consider your role's responsibilities and constraints while analyzing.`, ar.Role, input)

	// Use LLM to generate thought
	response, err := ar.llm.Complete(ctx, prompt, nil)
	if err != nil {
		return nil, fmt.Errorf("failed to think: %w", err)
	}

	// Create thought from response
	thought := &ActorThought{
		Content:    response,
		Reasoning:  "Parsed reasoning",
		Plan:       []string{"Step 1", "Step 2"},
		Critical:   "Potential issues",
		NextAction: "Next step",
		Timestamp:  time.Now(),
	}

	// Add to thought chain
	ar.rc.thoughtChain.AddThought(thought)

	return thought, nil
}

// Plan creates a plan for executing the current task
func (ar *ActorRole) Plan(ctx context.Context) error {
	ar.mutex.Lock()
	ar.rc.state = RoleStatePlanning
	ar.mutex.Unlock()

	defer func() {
		ar.mutex.Lock()
		ar.rc.state = RoleStateIdle
		ar.mutex.Unlock()
	}()

	task := ar.rc.currentTask
	if task == nil {
		return fmt.Errorf("no task to plan")
	}

	// Generate plan using LLM
	planThought, err := ar.Think(ctx, fmt.Sprintf("Plan execution for task: %s", task.Description))
	if err != nil {
		return fmt.Errorf("failed to generate plan: %w", err)
	}

	// Store plan in memory
	entry := &memory.MemoryEntry{
		ID:      task.Id,
		Type:    "plan",
		Content: planThought.Content,
		Metadata: map[string]interface{}{
			"plan":      planThought.Plan,
			"reasoning": planThought.Reasoning,
		},
		CreatedAt: time.Now(),
		UpdatedAt: time.Now(),
	}

	if err := ar.rc.memory.Store(ctx, entry); err != nil {
		return fmt.Errorf("failed to store plan: %w", err)
	}

	return nil
}

// Execute executes the current plan
func (ar *ActorRole) Execute(ctx context.Context) error {
	ar.mutex.Lock()
	ar.rc.state = RoleStateExecuting
	ar.mutex.Unlock()

	defer func() {
		ar.mutex.Lock()
		ar.rc.state = RoleStateIdle
		ar.mutex.Unlock()
	}()

	task := ar.rc.currentTask
	if task == nil {
		return fmt.Errorf("no task to execute")
	}

	// Get plan from memory
	entry, err := ar.rc.memory.Get(ctx, task.Id)
	if err != nil {
		return fmt.Errorf("failed to get plan: %w", err)
	}

	plan, ok := entry.Metadata["plan"].([]string)
	if !ok {
		return fmt.Errorf("invalid plan format")
	}

	// Execute each step in the plan
	for _, step := range plan {
		if err := ar.executeStep(ctx, step); err != nil {
			return fmt.Errorf("failed to execute step: %w", err)
		}
	}

	return nil
}

// executeStep executes a single step in the plan
func (ar *ActorRole) executeStep(ctx context.Context, step string) error {
	// Think about how to execute this step
	stepThought, err := ar.Think(ctx, fmt.Sprintf("Execute step: %s", step))
	if err != nil {
		return fmt.Errorf("failed to think about step execution: %w", err)
	}

	// Find appropriate action for this step
	action, ok := ar.rc.actions[stepThought.NextAction]
	if !ok {
		return fmt.Errorf("no action found for step: %s", step)
	}

	// Execute the action
	_, err = action.Run(ctx, step)
	if err != nil {
		return fmt.Errorf("failed to execute action: %w", err)
	}

	return nil
}

// Helper methods
func (ar *ActorRole) sendError(ctx actor.Context, err error) {
	errMsg := &proto.AgentMessage{
		Id:   uuid.NewString(),
		Type: proto.MessageType_MESSAGE_TYPE_ERROR,
		Content: &proto.AgentMessage_Error{
			Error: &proto.Error{
				Code:    "INTERNAL_ERROR",
				Message: err.Error(),
			},
		},
		Sender: ar.Role,
	}
	ctx.Send(ctx.Parent(), errMsg)
}

// State management
func (ar *ActorRole) GetState() RoleState {
	ar.rc.mutex.RLock()
	defer ar.rc.mutex.RUnlock()
	return ar.rc.state
}

func (ar *ActorRole) SetState(state RoleState) {
	ar.rc.mutex.Lock()
	defer ar.rc.mutex.Unlock()
	ar.rc.state = state
}

// Action management
func (ar *ActorRole) AddAction(action ActorAction) {
	ar.rc.mutex.Lock()
	defer ar.rc.mutex.Unlock()
	ar.rc.actions[action.Name()] = action
}

func (ar *ActorRole) GetAction(name string) (ActorAction, bool) {
	ar.rc.mutex.RLock()
	defer ar.rc.mutex.RUnlock()
	action, ok := ar.rc.actions[name]
	return action, ok
}

// Capability management
func (ar *ActorRole) AddCapability(capability string) {
	ar.rc.mutex.Lock()
	defer ar.rc.mutex.Unlock()
	ar.rc.capabilities = append(ar.rc.capabilities, capability)
}

func (ar *ActorRole) GetCapabilities() []string {
	ar.rc.mutex.RLock()
	defer ar.rc.mutex.RUnlock()
	return append([]string{}, ar.rc.capabilities...)
}

// Environment integration
func (ar *ActorRole) SetEnvironment(env *EnvironmentActor) {
	ar.rc.mutex.Lock()
	defer ar.rc.mutex.Unlock()
	ar.rc.env = env
}

// Provider management
func (ar *ActorRole) SetLLM(llm *providers.MockLLM) {
	ar.llm = llm
}

func (ar *ActorRole) SetKnowledgeBase(kb *knowledge.RAGEngine) {
	ar.knowledge = kb
}

// Lifecycle hooks
func (ar *ActorRole) OnStarted() {
	ar.SetState(RoleStateIdle)
	ar.logger.WithFields(logrus.Fields{
		"role": ar.Role,
		"name": ar.Name,
	}).Info("Actor role started")
}

func (ar *ActorRole) OnStopping() {
	ar.SetState(RoleStateWaiting)
	ar.logger.WithFields(logrus.Fields{
		"role": ar.Role,
		"name": ar.Name,
	}).Info("Actor role stopping")
}

func (ar *ActorRole) OnStopped() {
	ar.SetState(RoleStateFinished)
	ar.logger.WithFields(logrus.Fields{
		"role": ar.Role,
		"name": ar.Name,
	}).Info("Actor role stopped")
}

func (ar *ActorRole) OnRestarting() {
	ar.SetState(RoleStateIdle)
	ar.logger.WithFields(logrus.Fields{
		"role": ar.Role,
		"name": ar.Name,
	}).Info("Actor role restarting")
}
