package worker_pool

import (
	"context"
	"github.com/panjf2000/ants/v2"
	"github.com/zeromicro/go-zero/core/logx"
	"payme3000/framework/logxx"
	frameworkRecover "payme3000/framework/recover"
	"payme3000/framework/util/math"
	"sync"
	"sync/atomic"
	"time"
)

type (
	WorkerPool struct {
		config     *Config
		chanList   []*Chan
		waitingLen int32
		backupPool *ants.Pool // 备用协程池, chanList的channel满了托管投递工作

		ctx    context.Context
		cancel context.CancelFunc
		wg     sync.WaitGroup
	}

	Config struct {
		Context    context.Context // 父Context, 父Context cancel时也会cancel WorkerPool的Context
		ChannelNum int8            // 有多少个channel, 最多16个
		WorkerNum  int8            // 每条channel有多个worker, 最多8个
		Capacity   int32           // 每条channel能接收最大Job数量, 最多10240个
		Discard    bool            // 退出时是否丢弃还未处理的Job
	}

	Job     func(ctx context.Context)
	JobWrap func()
	Chan    struct {
		c    chan JobWrap
		stop sync.Once
	}
)

func NewWorkerPool(config ...*Config) *WorkerPool {
	var _config *Config
	if len(config) > 0 {
		_config = config[0]
	} else {
		// default config
		_config = NewDefaultConfig()
	}

	_config.ChannelNum = math.Min(_config.ChannelNum, 16)
	_config.WorkerNum = math.Min(_config.WorkerNum, 8)
	_config.Capacity = math.Min(_config.Capacity, 10240)
	if _config.ChannelNum <= 0 {
		_config.ChannelNum = 1
	}
	if _config.WorkerNum <= 0 {
		_config.WorkerNum = 1
	}
	if _config.Capacity <= 0 {
		_config.Capacity = 8192
	}

	p := &WorkerPool{
		config: _config,
	}

	p.chanList = make([]*Chan, _config.ChannelNum)
	for i := int8(0); i < _config.ChannelNum; i++ {
		p.chanList[i] = &Chan{
			c: make(chan JobWrap, _config.Capacity),
		}
	}

	if _config.Context == nil {
		p.ctx, p.cancel = context.WithCancel(context.Background())
	} else {
		p.ctx, p.cancel = context.WithCancel(_config.Context)
	}

	return p
}

func NewDefaultConfig() *Config {
	return &Config{
		ChannelNum: 1,
		WorkerNum:  1,
		Capacity:   8192,
	}
}

func (p *WorkerPool) Start() {
	for _, c := range p.chanList {
		for i := int8(0); i < p.config.WorkerNum; i++ {
			_c := c
			go frameworkRecover.WithRecover(func() {
				p.working(_c)
			})
		}
	}
}

func (p *WorkerPool) Stop() {
	p.cancel()

	if p.backupPool != nil {
		p.backupPool.Release()
	}

	p.wg.Wait()
}

// Submit 提交任务, 注意因为会close channel, 在并发时有极低概率触发panic, 要recover
func (p *WorkerPool) Submit(ctx context.Context, job Job, hash int64) {
	defer logxx.LogCrash()

	channel := hash % int64(p.config.ChannelNum)
	wctx := context.WithoutCancel(ctx)
	jobWrap := func() {
		job(wctx)
	}

	select {
	case <-p.ctx.Done():
	case p.chanList[channel].c <- jobWrap:
	default:
		nowTimeMs := time.Now().UnixMilli()
		atomic.AddInt32(&p.waitingLen, 1)

		logx.Errorf(
			"worker pool is full with waitingLen:%d, channel:%d, config:%+v",
			p.waitingLen,
			channel,
			p.config,
		)

		task := func() {
			select {
			case <-p.ctx.Done():
				logx.Errorf(
					"cancel submit job to worker pool channel with waitingTime:%d ms, waitingLen:%d, channel:%d, config:%+v",
					time.Now().UnixMilli()-nowTimeMs,
					p.waitingLen,
					channel,
					p.config,
				)
			case p.chanList[channel].c <- jobWrap:
				atomic.AddInt32(&p.waitingLen, -1)
				logx.Errorf(
					"worker pool is full with waitingTime:%d ms, waitingLen:%d, channel:%d, config:%+v",
					time.Now().UnixMilli()-nowTimeMs,
					p.waitingLen,
					channel,
					p.config,
				)
			}
		}

		if p.backupPool == nil {
			ap, _ := ants.NewPool(4, ants.WithNonblocking(true))
			p.backupPool = ap
		}

		if err := p.backupPool.Submit(task); err != nil {
			logx.Errorf("backup pool submit error with err:%s", err)
			go frameworkRecover.WithRecover(task)
		}
	}
}

func (p *WorkerPool) working(ch *Chan) {
	p.wg.Add(1)
	defer p.wg.Done()

	for {
		select {
		case <-p.ctx.Done():
			if !p.config.Discard {
				ch.stop.Do(func() {
					close(ch.c)
				})
				for job := range ch.c {
					if job != nil {
						job()
					}
				}
			}

			return
		case job, ok := <-ch.c:
			if !ok {
				return
			}

			if job != nil {
				job()
			}
		}
	}
}
