package mutli_cache

// thank https://github.com/ivpusic/grpool
import (
	"runtime"
	"sync"
	"time"
)

// Gorouting instance which can accept client jobs
type worker struct {
	id   int64       // 线程id
	pool *WorkerPool // 反向引用
}

func (w *worker) start() {
	go func() {
		if CacheDebug {
			LogDebugF("WorkerPool [%d] worker start\n", w.id)
		}
		defer func() {
			w.pool.wg.Done()
		}()
		for {
			select {
			case <-w.pool.stopped:
				// 线程池关闭,不接收任务
				if CacheDebug {
					LogDebugF("WorkerPool [%d] worker <-isStop\n", w.id)
				}
				// 运行原先就在线程池中的任务
				if len(w.pool.jobQueue) != 0 {
					for job := range w.pool.jobQueue {
						runJob(w.id, job)
					}
				}
				if CacheDebug {
					LogDebugF("WorkerPool [%d] worker exit\n", w.id)
				}
				return
			case job, ok := <-w.pool.jobQueue:
				// 有任务进入,则开始运行
				if ok {
					runJob(w.id, job)
				}
			}
		}
	}()
}

// runJob 执行传入任务,并对err进行处理,打印日志
func runJob(id int64, f func()) {
	defer func() {
		if err := recover(); err != nil {
			if CacheDebug {
				LogErrF("WorkerPool [%d] Job panic err: %v, stack: %v\n", id, err, string(outputStackErr()))
			}
		}
	}()
	f()
}

func outputStackErr() []byte {
	var (
		buf [4096]byte
	)
	n := runtime.Stack(buf[:], false)
	return buf[:n]
}

// newWorker 创建工作线程
func newWorker(id int64, pool *WorkerPool) *worker {
	w := &worker{
		id:   id,
		pool: pool,
	}
	w.start()
	return w
}

// Job 任务结构体
type Job func()

type WorkerPool struct {
	jobQueue chan Job       // 工作队列channel,有任务进入会被worker接收,并开始执行
	workers  []*worker      // 工作线程
	stopOne  sync.Once      // 线程池停止锁
	stopped  chan struct{}  // 线程池停止信号
	wg       sync.WaitGroup // 等待锁
}

// NewPool 创建缓存池
// numWorkers 创建的工作线程数量
// queueLen 阻塞时能接受多少任务
func NewPool(numWorkers int, jobQueueLen int) *WorkerPool {
	// 创建线程池
	pool := &WorkerPool{
		jobQueue: make(chan Job, jobQueueLen),
		workers:  make([]*worker, numWorkers),
		stopped:  make(chan struct{}),
	}
	// waitGroup代表当前有多少线程正在运行,只有当wg == 0时缓存才能被彻底关闭
	for i := 0; i < numWorkers; i++ {
		pool.wg.Add(1)
		// 创建工作线程
		pool.workers[i] = newWorker(int64(i), pool)
	}
	// 如果开启监视器
	if CacheMonitor {
		pool.wg.Add(1)
		// 线程池监视器
		go pool.monitor()
	}

	return pool
}

func (p *WorkerPool) wrapJob(job func()) func() {
	return job
}

// SendJobWithTimeout 运行任务,在t时间内没有完成则返回false
func (p *WorkerPool) SendJobWithTimeout(job func(), t time.Duration) bool {
	select {
	case <-p.stopped:
		return false
	case <-time.After(t):
		return false
	case p.jobQueue <- p.wrapJob(job):
		// 如果队列达到上线则需要等待,超时则返回false
		return true
	}
}

// SendJobWithDeadline 运行任务,在期限内完成,没有完成则返回false
func (p *WorkerPool) SendJobWithDeadline(job func(), t time.Time) bool {
	s := t.Sub(time.Now())
	if s <= 0 {
		s = time.Second
	}
	select {
	case <-p.stopped:
		return false
	case <-time.After(s):
		return false
	case p.jobQueue <- p.wrapJob(job):
		// 如果队列达到上线则需要等待,超时则返回false
		return true
	}
}

// SendJob 运行任务
func (p *WorkerPool) SendJob(job func()) {
	select {
	case p.jobQueue <- p.wrapJob(job):
	case <-p.stopped:
		return
	}
}

// monitor 线程池监视器
func (p *WorkerPool) monitor() {
	// 每5秒进行日志记录
	t := time.NewTicker(time.Duration(CacheMonitorSecond) * time.Second)
	for {
		select {
		case <-p.stopped:
			t.Stop()
			return
		case <-t.C:
			LogDebug("WorkerPool jobQueue current len ", len(p.jobQueue))
		}
	}
}

// release 关闭线程池
func (p *WorkerPool) release() {
	close(p.stopped)
	force := make(chan struct{})
	forceOne := sync.Once{}
	// 闭包传入force的channel
	go func() {
		for {
			select {
			case <-force:
				return
			default:
				// 当wg == 0时,线程池才能安全关闭
				p.wg.Wait()
				forceOne.Do(func() {
					close(force)
				})
				return
			}
		}
	}()
	// 5秒后彻底关闭
	time.AfterFunc(5*time.Second, func() {
		forceOne.Do(func() {
			close(force)
		})
	})
	<-force
	close(p.jobQueue)
}

// Release 关闭线程池,上锁,多线程只允许一次
func (p *WorkerPool) Release() {
	p.stopOne.Do(p.release)
}
