package processor

import (
	"fmt"
	"runtime"
	"sync"
	"time"
	"vidproc/internal/config"
)

// WorkerPool 工作池接口
type WorkerPool interface {
	Start() error
	Submit(task func() error) error
	Stop()
	GetWorkerCount() int
}

// Task 表示一个视频处理任务
type Task struct {
	Process func() error
}

// BaseWorkerPool 基础工作池实现
type BaseWorkerPool struct {
	maxWorkers  int
	taskQueue   chan Task
	workerCount int
	stopChan    chan struct{}
	wg          sync.WaitGroup
	mu          sync.RWMutex
}

// DynamicWorkerPool 动态工作池
type DynamicWorkerPool struct {
	*BaseWorkerPool
	config         *config.VideoConfig
	checkInterval  time.Duration
	adjustWorkerCh chan struct{}
}

// AutoWorkerPool 自动工作池
type AutoWorkerPool struct {
	*BaseWorkerPool
	config *config.VideoConfig
}

// NewWorkerPool 创建工作池
func NewWorkerPool(cfg *config.VideoConfig) (WorkerPool, error) {
	switch cfg.ConcurrencySettings.Mode {
	case config.ModeDynamic:
		return newDynamicWorkerPool(cfg)
	case config.ModeAuto:
		return newAutoWorkerPool(cfg)
	case config.ModeStatic:
		return newBaseWorkerPool(cfg.ConcurrencySettings.MaxWorkers, cfg.ConcurrencySettings.QueueSize)
	default:
		return nil, fmt.Errorf("unknown concurrency mode: %s", cfg.ConcurrencySettings.Mode)
	}
}

func newBaseWorkerPool(maxWorkers, queueSize int) (*BaseWorkerPool, error) {
	if maxWorkers <= 0 {
		return nil, fmt.Errorf("maxWorkers must be greater than 0")
	}

	return &BaseWorkerPool{
		maxWorkers:  maxWorkers,
		taskQueue:   make(chan Task, queueSize),
		stopChan:    make(chan struct{}),
		workerCount: maxWorkers,
	}, nil
}

func newDynamicWorkerPool(cfg *config.VideoConfig) (*DynamicWorkerPool, error) {
	base, err := newBaseWorkerPool(
		cfg.ConcurrencySettings.MaxWorkers,
		cfg.ConcurrencySettings.QueueSize,
	)
	if err != nil {
		return nil, err
	}

	return &DynamicWorkerPool{
		BaseWorkerPool: base,
		config:         cfg,
		checkInterval:  time.Duration(cfg.ConcurrencySettings.Dynamic.CheckInterval) * time.Second,
		adjustWorkerCh: make(chan struct{}),
	}, nil
}

func newAutoWorkerPool(cfg *config.VideoConfig) (*AutoWorkerPool, error) {
	// 自动计算最佳工作协程数
	cpuCores := runtime.NumCPU()
	maxWorkers := cpuCores * 2 // 默认为 CPU 核心数的 2 倍

	// 根据配置的 CPU 使用率限制调整
	if cfg.ConcurrencySettings.Auto.MaxCPUPercent > 0 {
		maxWorkers = cpuCores * cfg.ConcurrencySettings.Auto.MaxCPUPercent / 100
	}

	base, err := newBaseWorkerPool(maxWorkers, cfg.ConcurrencySettings.QueueSize)
	if err != nil {
		return nil, err
	}

	return &AutoWorkerPool{
		BaseWorkerPool: base,
		config:         cfg,
	}, nil
}

// Start 启动工作池
func (p *BaseWorkerPool) Start() error {
	for i := 0; i < p.maxWorkers; i++ {
		p.wg.Add(1)
		go p.worker()
	}
	return nil
}

// Submit 提交任务
func (p *BaseWorkerPool) Submit(task func() error) error {
	select {
	case p.taskQueue <- Task{Process: task}:
		return nil
	case <-p.stopChan:
		return fmt.Errorf("worker pool is stopped")
	}
}

// Stop 停止工作池
func (p *BaseWorkerPool) Stop() {
	close(p.stopChan)
	p.wg.Wait()
}

// GetWorkerCount 获取当前工作协程数
func (p *BaseWorkerPool) GetWorkerCount() int {
	p.mu.RLock()
	defer p.mu.RUnlock()
	return p.workerCount
}

func (p *BaseWorkerPool) worker() {
	defer p.wg.Done()

	for {
		select {
		case task := <-p.taskQueue:
			if err := task.Process(); err != nil {
				// TODO: 处理错误
				fmt.Printf("Error processing task: %v\n", err)
			}
		case <-p.stopChan:
			return
		}
	}
}

// Start 启动动态工作池
func (p *DynamicWorkerPool) Start() error {
	if err := p.BaseWorkerPool.Start(); err != nil {
		return err
	}

	// 启动资源监控
	go p.monitorResources()
	return nil
}

func (p *DynamicWorkerPool) monitorResources() {
	ticker := time.NewTicker(p.checkInterval)
	defer ticker.Stop()

	for {
		select {
		case <-ticker.C:
			p.adjustWorkers()
		case <-p.stopChan:
			return
		}
	}
}

func (p *DynamicWorkerPool) adjustWorkers() {
	// TODO: 实现资源监控和动态调整
	// 1. 获取当前系统资源使用情况
	// 2. 根据阈值调整工作协程数
	// 3. 确保不超过最大值和最小值
}
