package worker

import (
	"errors"
	"fmt"
	"sync"
	"sync/atomic"

	"gitee.com/yuebanlaosiji/gocelery/pkg/backend"
	"gitee.com/yuebanlaosiji/gocelery/pkg/broker"
	"gitee.com/yuebanlaosiji/gocelery/pkg/protocol"
	"gitee.com/yuebanlaosiji/gocelery/pkg/registry"
	"gitee.com/yuebanlaosiji/gocelery/pkg/serializer"
	"gitee.com/yuebanlaosiji/gocelery/pkg/types"
)

var (
	// ErrWorkerRunning is returned when attempting to start an already running worker
	ErrWorkerRunning = errors.New("worker is already running")

	// ErrWorkerNotRunning is returned when attempting to stop a worker that is not running
	ErrWorkerNotRunning = errors.New("worker is not running")

	// ErrMissingBroker is returned when broker is not provided
	ErrMissingBroker = errors.New("broker is required")

	// ErrMissingRegistry is returned when registry is not provided
	ErrMissingRegistry = errors.New("registry is required")
)

// WorkerImpl implements the Worker interface
type WorkerImpl struct {
	broker     broker.Broker
	backend    backend.Backend
	registry   registry.Registry
	protocol   protocol.Protocol
	serializer serializer.Serializer

	// Worker options
	queueName   string
	concurrency int

	// Worker state
	running        bool
	activeTasks    int32
	processedTasks int32

	// Channels for worker control
	quit chan struct{}
	wg   sync.WaitGroup
	mu   sync.Mutex
}

// WorkerOptions contains options for creating a worker
type WorkerOptions struct {
	QueueName   string
	Concurrency int
}

// NewWorker creates a new worker
func NewWorker(
	brk broker.Broker,
	bck backend.Backend,
	reg registry.Registry,
	proto protocol.Protocol,
	ser serializer.Serializer,
	options *WorkerOptions,
) (*WorkerImpl, error) {
	if brk == nil {
		return nil, ErrMissingBroker
	}

	if reg == nil {
		return nil, ErrMissingRegistry
	}

	// Use default options if not provided
	if options == nil {
		options = &WorkerOptions{
			QueueName:   "celery",
			Concurrency: 1,
		}
	}

	// Ensure minimum concurrency
	if options.Concurrency < 1 {
		options.Concurrency = 1
	}

	worker := &WorkerImpl{
		broker:      brk,
		backend:     bck,
		registry:    reg,
		protocol:    proto,
		serializer:  ser,
		queueName:   options.QueueName,
		concurrency: options.Concurrency,
		quit:        make(chan struct{}),
	}

	return worker, nil
}

// Register registers a task handler
func (w *WorkerImpl) Register(name string, handler types.TaskFunc) error {
	return w.registry.Register(name, handler)
}

// Start starts the worker
func (w *WorkerImpl) Start() error {
	w.mu.Lock()
	defer w.mu.Unlock()

	if w.running {
		return ErrWorkerRunning
	}

	// Reset the quit channel
	w.quit = make(chan struct{})

	// Start consuming messages from the broker
	msgChan, err := w.broker.ConsumeMessages(w.queueName)
	if err != nil {
		return fmt.Errorf("failed to start consuming messages: %w", err)
	}

	// Launch worker goroutines
	for i := 0; i < w.concurrency; i++ {
		w.wg.Add(1)
		go w.worker(msgChan)
	}

	w.running = true
	return nil
}

// Stop stops the worker
func (w *WorkerImpl) Stop() error {
	w.mu.Lock()
	defer w.mu.Unlock()

	if !w.running {
		return ErrWorkerNotRunning
	}

	// Signal worker goroutines to stop
	close(w.quit)

	// Wait for all workers to finish
	w.wg.Wait()

	w.running = false
	return nil
}

// Status returns the worker status
func (w *WorkerImpl) Status() WorkerStatus {
	w.mu.Lock()
	defer w.mu.Unlock()

	return WorkerStatus{
		Running:        w.running,
		ActiveTasks:    int(atomic.LoadInt32(&w.activeTasks)),
		TasksProcessed: int(atomic.LoadInt32(&w.processedTasks)),
	}
}

// worker is the goroutine that processes tasks
func (w *WorkerImpl) worker(msgChan <-chan []byte) {
	defer w.wg.Done()

	for {
		select {
		case <-w.quit:
			return
		case msg, ok := <-msgChan:
			if !ok {
				// Channel closed
				return
			}

			// Process the message
			w.processMessage(msg)
		}
	}
}

// processMessage processes a task message
func (w *WorkerImpl) processMessage(msg []byte) {
	// Increment active tasks counter
	atomic.AddInt32(&w.activeTasks, 1)

	// Decrement active tasks counter when done
	defer atomic.AddInt32(&w.activeTasks, -1)

	// Parse the task
	task, err := w.protocol.ParseTaskMessage(msg)
	if err != nil {
		// Log error in real implementation
		return
	}

	// Get the task handler
	handler, err := w.registry.GetTask(task.Name)
	if err != nil {
		// Store result if backend is available
		if w.backend != nil {
			result := types.TaskResult{
				TaskID: task.ID,
				Status: types.StatusFailure,
				Error:  fmt.Sprintf("task not found: %s", task.Name),
			}

			resultBytes, _ := w.protocol.CreateResultMessage(result)
			_ = w.backend.SetResult(task.ID, resultBytes)
		}
		return
	}

	// Execute the task
	resultValue, err := handler(task.Args, task.Kwargs)

	// Store the result if backend is available
	if w.backend != nil {
		var result types.TaskResult

		if err != nil {
			result = types.TaskResult{
				TaskID: task.ID,
				Status: types.StatusFailure,
				Error:  err.Error(),
			}
		} else {
			result = types.TaskResult{
				TaskID: task.ID,
				Status: types.StatusSuccess,
				Result: resultValue,
			}
		}

		resultBytes, _ := w.protocol.CreateResultMessage(result)
		_ = w.backend.SetResult(task.ID, resultBytes)
	}

	// Increment processed tasks counter
	atomic.AddInt32(&w.processedTasks, 1)
}
