package algorithms

import (
	"context"
	"runtime"
	"sync"
	"time"
)

// WorkerPool 高效的工作池实现
type WorkerPool struct {
	workerCount int
	jobQueue    chan Job
	resultQueue chan Result
	wg          sync.WaitGroup
	ctx         context.Context
	cancel      context.CancelFunc
	started     bool
	mu          sync.RWMutex
}

// Job 工作任务接口
type Job interface {
	Execute() (interface{}, error)
	ID() string
}

// Result 任务结果
type Result struct {
	JobID  string
	Data   interface{}
	Error  error
	Timing time.Duration
}

// NewWorkerPool 创建新的工作池
func NewWorkerPool(workerCount int, jobBufferSize int) *WorkerPool {
	if workerCount <= 0 {
		workerCount = runtime.NumCPU()
	}
	
	ctx, cancel := context.WithCancel(context.Background())
	
	return &WorkerPool{
		workerCount: workerCount,
		jobQueue:    make(chan Job, jobBufferSize),
		resultQueue: make(chan Result, jobBufferSize),
		ctx:         ctx,
		cancel:      cancel,
	}
}

// Start 启动工作池
func (wp *WorkerPool) Start() {
	wp.mu.Lock()
	defer wp.mu.Unlock()
	
	if wp.started {
		return
	}
	
	for i := 0; i < wp.workerCount; i++ {
		wp.wg.Add(1)
		go wp.worker(i)
	}
	
	wp.started = true
}

// worker 工作协程
func (wp *WorkerPool) worker(id int) {
	defer wp.wg.Done()
	
	for {
		select {
		case job, ok := <-wp.jobQueue:
			if !ok {
				return
			}
			
			start := time.Now()
			data, err := job.Execute()
			duration := time.Since(start)
			
			result := Result{
				JobID:  job.ID(),
				Data:   data,
				Error:  err,
				Timing: duration,
			}
			
			select {
			case wp.resultQueue <- result:
			case <-wp.ctx.Done():
				return
			}
			
		case <-wp.ctx.Done():
			return
		}
	}
}

// Submit 提交任务
func (wp *WorkerPool) Submit(job Job) bool {
	select {
	case wp.jobQueue <- job:
		return true
	case <-wp.ctx.Done():
		return false
	default:
		return false
	}
}

// Results 获取结果通道
func (wp *WorkerPool) Results() <-chan Result {
	return wp.resultQueue
}

// Stop 停止工作池
func (wp *WorkerPool) Stop() {
	wp.mu.Lock()
	defer wp.mu.Unlock()
	
	if !wp.started {
		return
	}
	
	close(wp.jobQueue)
	wp.wg.Wait()
	wp.cancel()
	close(wp.resultQueue)
	wp.started = false
}

// BatchProcessor 批处理器，优化大量数据处理
type BatchProcessor struct {
	batchSize   int
	processFunc func([]interface{}) ([]interface{}, error)
	workerPool  *WorkerPool
}

// NewBatchProcessor 创建批处理器
func NewBatchProcessor(batchSize int, processFunc func([]interface{}) ([]interface{}, error)) *BatchProcessor {
	return &BatchProcessor{
		batchSize:   batchSize,
		processFunc: processFunc,
		workerPool:  NewWorkerPool(runtime.NumCPU(), 100),
	}
}

// Process 批量处理数据
func (bp *BatchProcessor) Process(data []interface{}) ([]interface{}, error) {
	if len(data) == 0 {
		return nil, nil
	}
	
	bp.workerPool.Start()
	defer bp.workerPool.Stop()
	
	// 分批处理
	batches := bp.createBatches(data)
	var results []interface{}
	var mu sync.Mutex
	var wg sync.WaitGroup
	errorChan := make(chan error, len(batches))
	
	for _, batch := range batches {
		wg.Add(1)
		go func(b []interface{}) {
			defer wg.Done()
			
			batchResult, err := bp.processFunc(b)
			if err != nil {
				errorChan <- err
				return
			}
			
			mu.Lock()
			results = append(results, batchResult...)
			mu.Unlock()
		}(batch)
	}
	
	wg.Wait()
	close(errorChan)
	
	// 检查错误
	for err := range errorChan {
		if err != nil {
			return nil, err
		}
	}
	
	return results, nil
}

func (bp *BatchProcessor) createBatches(data []interface{}) [][]interface{} {
	var batches [][]interface{}
	
	for i := 0; i < len(data); i += bp.batchSize {
		end := i + bp.batchSize
		if end > len(data) {
			end = len(data)
		}
		batches = append(batches, data[i:end])
	}
	
	return batches
}

// AsyncProcessor 异步处理器
type AsyncProcessor struct {
	processFunc func(interface{}) (interface{}, error)
	workerPool  *WorkerPool
	timeout     time.Duration
}

// NewAsyncProcessor 创建异步处理器
func NewAsyncProcessor(processFunc func(interface{}) (interface{}, error), timeout time.Duration) *AsyncProcessor {
	return &AsyncProcessor{
		processFunc: processFunc,
		workerPool:  NewWorkerPool(runtime.NumCPU()*2, 1000),
		timeout:     timeout,
	}
}

// ProcessAsync 异步处理单个任务
func (ap *AsyncProcessor) ProcessAsync(data interface{}) <-chan Result {
	resultChan := make(chan Result, 1)
	
	go func() {
		defer close(resultChan)
		
		ctx, cancel := context.WithTimeout(context.Background(), ap.timeout)
		defer cancel()
		
		done := make(chan Result, 1)
		
		go func() {
			start := time.Now()
			result, err := ap.processFunc(data)
			duration := time.Since(start)
			
			done <- Result{
				JobID:  "async",
				Data:   result,
				Error:  err,
				Timing: duration,
			}
		}()
		
		select {
		case result := <-done:
			resultChan <- result
		case <-ctx.Done():
			resultChan <- Result{
				JobID:  "async",
				Error:  ctx.Err(),
				Timing: ap.timeout,
			}
		}
	}()
	
	return resultChan
}

// ProcessBatch 异步批量处理
func (ap *AsyncProcessor) ProcessBatch(data []interface{}) <-chan []Result {
	resultChan := make(chan []Result, 1)
	
	go func() {
		defer close(resultChan)
		
		ap.workerPool.Start()
		defer ap.workerPool.Stop()
		
		var results []Result
		var mu sync.Mutex
		var wg sync.WaitGroup
		
		for i, item := range data {
			wg.Add(1)
			go func(index int, d interface{}) {
				defer wg.Done()
				
				start := time.Now()
				result, err := ap.processFunc(d)
				duration := time.Since(start)
				
				mu.Lock()
				results = append(results, Result{
					JobID:  string(rune(index)),
					Data:   result,
					Error:  err,
					Timing: duration,
				})
				mu.Unlock()
			}(i, item)
		}
		
		wg.Wait()
		resultChan <- results
	}()
	
	return resultChan
}

// MemoryPool 内存池，减少GC压力
type MemoryPool struct {
	pool sync.Pool
	size int
}

// NewMemoryPool 创建内存池
func NewMemoryPool(size int) *MemoryPool {
	return &MemoryPool{
		pool: sync.Pool{
			New: func() interface{} {
				return make([]byte, size)
			},
		},
		size: size,
	}
}

// Get 获取缓冲区
func (mp *MemoryPool) Get() []byte {
	return mp.pool.Get().([]byte)
}

// Put 归还缓冲区
func (mp *MemoryPool) Put(buf []byte) {
	if len(buf) == mp.size {
		mp.pool.Put(buf[:0]) // 重置长度但保留容量
	}
}

// PipelineProcessor 流水线处理器
type PipelineProcessor struct {
	stages []func(interface{}) (interface{}, error)
	buffer int
}

// NewPipelineProcessor 创建流水线处理器
func NewPipelineProcessor(buffer int) *PipelineProcessor {
	return &PipelineProcessor{
		buffer: buffer,
	}
}

// AddStage 添加处理阶段
func (pp *PipelineProcessor) AddStage(stage func(interface{}) (interface{}, error)) {
	pp.stages = append(pp.stages, stage)
}

// Process 流水线处理
func (pp *PipelineProcessor) Process(input <-chan interface{}) <-chan Result {
	output := make(chan Result, pp.buffer)
	
	go func() {
		defer close(output)
		
		for data := range input {
			start := time.Now()
			current := data
			var err error
			
			// 依次通过所有阶段
			for i, stage := range pp.stages {
				current, err = stage(current)
				if err != nil {
					output <- Result{
						JobID:  "pipeline",
						Error:  err,
						Timing: time.Since(start),
					}
					break
				}
				
				// 如果是最后一个阶段，输出结果
				if i == len(pp.stages)-1 {
					output <- Result{
						JobID:  "pipeline",
						Data:   current,
						Timing: time.Since(start),
					}
				}
			}
		}
	}()
	
	return output
}