package workerpool

import (

	"github.com/gammazero/deque"        //把这种第三方库包贴到c:/Go/src下面 ,就能找到了
	"sync"
	"sync/atomic"
	"time"
)

const (
	// If worker pool receives no new work for this period of time, then stop
	// a worker goroutine.
	idleTimeoutSec = 5
)

// New creates and starts a pool of worker goroutines.
//
// The maxWorkers parameter specifies the maximum number of workers that will
// execute tasks concurrently.  After each timeout period, a worker goroutine
// is stopped until there are no remaining workers.
func New(maxWorkers int) *WorkerPool {
	// There must be at least one worker.
	if maxWorkers < 1 {
		maxWorkers = 1
	}

	pool := &WorkerPool{
		maxWorkers:  maxWorkers,
		timeout:     time.Second * idleTimeoutSec,
		taskQueue:   make(chan func(), 1),
		workerQueue: make(chan func()),
		stoppedChan: make(chan struct{}),
	}

	// Start the task dispatcher.
	go pool.dispatch()

	return pool
}

// WorkerPool is a collection of goroutines, where the number of concurrent
// goroutines processing requests does not exceed the specified maximum.
type WorkerPool struct {
	maxWorkers   int
	timeout      time.Duration
	taskQueue    chan func()
	workerQueue  chan func()
	stoppedChan  chan struct{}
	waitingQueue deque.Deque
	stopOnce     sync.Once
	stopped      int32
	waiting      int32
	wait         bool
}

// Size returns the maximum number of concurrent workers.
func (p *WorkerPool) Size() int {
	return p.maxWorkers
}

// Stop stops the worker pool and waits for only currently running tasks to
// complete.  Pending tasks that are not currently running are abandoned.
// Tasks must not be submitted to the worker pool after calling stop.
//
// Since creating the worker pool starts at least one goroutine, for the
// dispatcher, Stop() or StopWait() should be called when the worker pool is no
// longer needed.
func (p *WorkerPool) Stop() {
	p.stop(false)
}

// StopWait stops the worker pool and waits for all queued tasks tasks to
// complete.  No additional tasks may be submitted, but all pending tasks are
// executed by workers before this function returns.
func (p *WorkerPool) StopWait() {
	p.stop(true)
}

// Stopped returns true if this worker pool has been stopped.
func (p *WorkerPool) Stopped() bool {
	return atomic.LoadInt32(&p.stopped) != 0
}

// Submit enqueues a function for a worker to execute.
//
// Any external values needed by the task function must be captured in a
// closure.  Any return values should be returned over a channel that is
// captured in the task function closure.
//
// Submit will not block regardless of the number of tasks submitted.  Each
// task is immediately given to an available worker or passed to a goroutine to
// be given to the next available worker.  If there are no available workers,
// the dispatcher adds a worker, until the maximum number of workers are
// running.
//
// After the maximum number of workers are running, and no workers are
// available, incoming tasks are put onto a queue and will be executed as
// workers become available.
//
// When no new tasks have been submitted for a time period and a worker is
// available, the worker is shutdown.  As long as no new tasks arrive, one
// available worker is shutdown each time period until there are no more idle
// workers.  Since the time to start new goroutines is not significant, there
// is no need to retain idle workers.
func (p *WorkerPool) Submit(task func()) {
	if task != nil {
		p.taskQueue <- task
	}
}

// SubmitWait enqueues the given function and waits for it to be executed.
func (p *WorkerPool) SubmitWait(task func()) {
	if task == nil {
		return
	}
	doneChan := make(chan struct{})
	p.taskQueue <- func() {
		task()
		close(doneChan)
	}
	<-doneChan
}

// WaitingQueueSize will return the size of the waiting queue
func (p *WorkerPool) WaitingQueueSize() int {
	return int(atomic.LoadInt32(&p.waiting))
}

// dispatch sends the next queued task to an available worker.
func (p *WorkerPool) dispatch() {
	defer close(p.stoppedChan)
	timeout := time.NewTimer(p.timeout)
	var (
		workerCount int
		task        func()
		ok          bool
	)
Loop: //break 语句还可以在语句后面添加标签,表示退出某个标签对应的代码块
	for {
		// As long as tasks are in the waiting queue, remove and execute these
		// tasks as workers become available, and place new incoming tasks on
		// the queue.  Once the queue is empty, then go back to submitting
		// incoming tasks directly to available workers.
		if p.waitingQueue.Len() != 0 {
			select {
			case task, ok = <-p.taskQueue:
				if !ok {
					break Loop
				}
				p.waitingQueue.PushBack(task)
			case p.workerQueue <- p.waitingQueue.Front().(func()):
				// A worker was ready, so gave task to worker.
				p.waitingQueue.PopFront()
			}
			atomic.StoreInt32(&p.waiting, int32(p.waitingQueue.Len()))
			continue
		}
		timeout.Reset(p.timeout)
		select {
		case task, ok = <-p.taskQueue:
			if !ok {
				break Loop
			}
			// Got a task to do.
			select {
			case p.workerQueue <- task:
			default:
				// No workers ready.
				// Create a new worker, if not at max.
				if workerCount < p.maxWorkers {
					workerCount++
					go func(t func()) {
						// Run initial task and start worker waiting for more.
						t()
						go startWorker(p.workerQueue)
					}(task)
				} else {
					// Enqueue task to be executed by next available worker.
					p.waitingQueue.PushBack(task)
					atomic.StoreInt32(&p.waiting, int32(p.waitingQueue.Len()))
				}
			}
		case <-timeout.C:
			// Timed out waiting for work to arrive.  Kill a ready worker.
			if workerCount > 0 {
				select {
				case p.workerQueue <- nil:
					// Send kill signal to worker.
					workerCount--
				default:
					// No work, but no ready workers.  All workers are busy.
				}
			}
		}
	}

	// If instructed to wait for all queued tasks, then remove from queue and
	// give to workers until queue is empty.
	if p.wait {
		for p.waitingQueue.Len() != 0 {
			// A worker is ready, so give task to worker.
			p.workerQueue <- p.waitingQueue.PopFront().(func())
			atomic.StoreInt32(&p.waiting, int32(p.waitingQueue.Len()))
		}
	}

	// Stop all remaining workers as they become ready.
	for workerCount > 0 {
		p.workerQueue <- nil
		workerCount--
	}
}

// startWorker starts a goroutine that executes tasks given by the dispatcher.
//
// To stop a worker, the dispatcher writes nil to the worker task queue.
func startWorker(workerQueue chan func()) {
	// Read tasks from dispatcher and execute.
	for task := range workerQueue {
		if task == nil {
			return
		}
		task()
	}
}

// stop tells the dispatcher to exit, and whether or not to complete queued
// tasks.
func (p *WorkerPool) stop(wait bool) {
	p.stopOnce.Do(func() {
		atomic.StoreInt32(&p.stopped, 1)
		p.wait = wait
		// Close task queue and wait for currently running tasks to finish.
		close(p.taskQueue)
		<-p.stoppedChan
	})
}
